python_code
stringlengths
0
4.04M
repo_name
stringlengths
8
58
file_path
stringlengths
5
147
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import chain import torch from fairseq import optim, utils class DynamicLossScaler(object): def __init__( self, init_scale=2.**15, scale_factor=2., scale_window=2000, tolerance=0.05, threshold=None, ): self.loss_scale = init_scale self.scale_factor = scale_factor self.scale_window = scale_window self.tolerance = tolerance self.threshold = threshold self._iter = 0 self._last_overflow_iter = -1 self._last_rescale_iter = -1 self._overflows_since_rescale = 0 def update_scale(self, overflow): iter_since_rescale = self._iter - self._last_rescale_iter if overflow: self._last_overflow_iter = self._iter self._overflows_since_rescale += 1 pct_overflow = self._overflows_since_rescale / float(iter_since_rescale) if pct_overflow >= self.tolerance: self._decrease_loss_scale() self._last_rescale_iter = self._iter self._overflows_since_rescale = 0 elif (self._iter - self._last_overflow_iter) % self.scale_window == 0: self.loss_scale *= self.scale_factor self._last_rescale_iter = self._iter self._iter += 1 def _decrease_loss_scale(self): self.loss_scale /= self.scale_factor if self.threshold is not None: self.loss_scale = max(self.loss_scale, self.threshold) @staticmethod def has_overflow(grad_norm): # detect inf and nan if grad_norm == float('inf') or grad_norm != grad_norm: return True return False class _FP16OptimizerMixin(object): def __init__(self, *args, **kwargs): # forward __init__ call to the next class in mro(method resolution order) super().__init__(*args, **kwargs) @classmethod def build_fp32_params(cls, params): # create FP32 copy of parameters and grads total_param_size = sum(p.data.numel() for p in params) fp32_params = params[0].new(0).float().new(total_param_size) offset = 0 for p in params: numel = p.data.numel() fp32_params[offset:offset+numel].copy_(p.data.view(-1)) offset += numel fp32_params = torch.nn.Parameter(fp32_params) fp32_params.grad = fp32_params.data.new(total_param_size) return fp32_params def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.fp32_optimizer.state_dict() state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if 'loss_scale' in state_dict: self.scaler.loss_scale = state_dict['loss_scale'] self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ loss = loss * self.scaler.loss_scale loss.backward() self._needs_sync = True def _sync_fp16_grads_to_fp32(self, multiply_grads=1.): if self._needs_sync: # copy FP16 grads to FP32 offset = 0 for p in self.fp16_params: if not p.requires_grad: continue grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape) numel = grad_data.numel() self.fp32_params.grad.data[offset:offset+numel].copy_(grad_data.view(-1)) offset += numel # correct for dynamic loss scaler self.fp32_params.grad.data.mul_(multiply_grads / self.scaler.loss_scale) self._needs_sync = False def multiply_grads(self, c): """Multiplies grads by a constant ``c``.""" if self._needs_sync: self._sync_fp16_grads_to_fp32(c) else: self.fp32_params.grad.data.mul_(c) def clip_grad_norm(self, max_norm): """Clips gradient norm and updates dynamic loss scaler.""" self._sync_fp16_grads_to_fp32() grad_norm = utils.clip_grad_norm_(self.fp32_params.grad.data, max_norm) # detect overflow and adjust loss scale overflow = DynamicLossScaler.has_overflow(grad_norm) self.scaler.update_scale(overflow) if overflow: if self.scaler.loss_scale <= self.min_loss_scale: # Use FloatingPointError as an uncommon error that parent # functions can safely catch to stop training. raise FloatingPointError(( 'Minimum loss scale reached ({}). Your loss is probably exploding. ' 'Try lowering the learning rate, using gradient clipping or ' 'increasing the batch size.' ).format(self.min_loss_scale)) raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale)) return grad_norm def step(self, closure=None): """Performs a single optimization step.""" self._sync_fp16_grads_to_fp32() self.fp32_optimizer.step(closure) # copy FP32 params back into FP16 model offset = 0 for p in self.fp16_params: if not p.requires_grad: continue numel = p.data.numel() p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data)) offset += numel def zero_grad(self): """Clears the gradients of all optimized parameters.""" for p in self.fp16_params: p.grad = None self._needs_sync = False class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer): """ Wrap an *optimizer* to support FP16 (mixed precision) training. """ def __init__(self, args, params, fp32_optimizer, fp32_params): super().__init__(args) self.fp16_params = params self.fp32_optimizer = fp32_optimizer self.fp32_params = fp32_params if getattr(args, 'fp16_scale_window', None) is None: if len(args.update_freq) > 1: raise ValueError( '--fp16-scale-window must be given explicitly when using a ' 'custom --update-freq schedule' ) scale_window = 2**14 / args.distributed_world_size / args.update_freq[0] else: scale_window = args.fp16_scale_window self.scaler = DynamicLossScaler( init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale, ) self.min_loss_scale = self.args.min_loss_scale @classmethod def build_optimizer(cls, args, params): """ Args: args (argparse.Namespace): fairseq args params (iterable): iterable of parameters to optimize """ fp32_params = cls.build_fp32_params(params) fp32_optimizer = optim.build_optimizer(args, [fp32_params]) return cls(args, params, fp32_optimizer, fp32_params) @property def optimizer(self): return self.fp32_optimizer.optimizer @property def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr) class _MemoryEfficientFP16OptimizerMixin(object): def __init__(self, *args, **kwargs): # forward __init__ call to the next class in mro(method resolution order) super().__init__(*args, **kwargs) def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.wrapped_optimizer.state_dict() state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if 'loss_scale' in state_dict: self.scaler.loss_scale = state_dict['loss_scale'] self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides) # Hack: PyTorch automatically casts the optimizer state to match the # type of the current parameters. But with --memory-efficient-fp16 the # params are FP16 while the optimizer state is FP32 and we don't want # to cast. A workaround is to manually copy back the original state # after the optimizer has been loaded. groups = self.optimizer.param_groups saved_groups = state_dict['param_groups'] id_map = { old_id: p for old_id, p in zip( chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)) ) } for k, v in state_dict['state'].items(): if k in id_map: param = id_map[k] self.optimizer.state[param] = v def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ loss = loss * self.scaler.loss_scale loss.backward() self._grads_are_scaled = True def _unscale_grads(self, multiply_grads=1.): if self._grads_are_scaled: self._grads_are_scaled = False # correct for dynamic loss scaler self.wrapped_optimizer.multiply_grads(multiply_grads / self.scaler.loss_scale) else: assert multiply_grads == 1. def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" if self._grads_are_scaled: self._unscale_grads(c) else: self.wrapped_optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm): """Clips gradient norm and updates dynamic loss scaler.""" self._unscale_grads() grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm) # detect overflow and adjust loss scale overflow = DynamicLossScaler.has_overflow(grad_norm) self.scaler.update_scale(overflow) if overflow: if self.scaler.loss_scale <= self.min_loss_scale: # Use FloatingPointError as an uncommon error that parent # functions can safely catch to stop training. raise FloatingPointError(( 'Minimum loss scale reached ({}). Your loss is probably exploding. ' 'Try lowering the learning rate, using gradient clipping or ' 'increasing the batch size.' ).format(self.min_loss_scale)) raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale)) return grad_norm def step(self, closure=None): """Performs a single optimization step.""" self._unscale_grads() self.wrapped_optimizer.step(closure) def zero_grad(self): """Clears the gradients of all optimized parameters.""" self.wrapped_optimizer.zero_grad() self._grads_are_scaled = False class MemoryEfficientFP16Optimizer(_MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer): """ Wrap an *optimizer* to support FP16 (mixed precision) training. Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not maintain an FP32 copy of the model. We instead expect the optimizer to convert the gradients to FP32 internally and sync the results back to the FP16 model params. This significantly reduces memory usage but slightly increases the time spent in the optimizer. Since this wrapper depends on specific functionality in the wrapped optimizer (i.e., on-the-fly conversion of grads to FP32), only certain optimizers can be wrapped. This is determined by the *supports_memory_efficient_fp16* property. """ def __init__(self, args, params, optimizer): if not optimizer.supports_memory_efficient_fp16: raise ValueError( 'Unsupported optimizer: {}'.format(optimizer.__class__.__name__) ) super().__init__(args) self.wrapped_optimizer = optimizer if getattr(args, 'fp16_scale_window', None) is None: if len(args.update_freq) > 1: raise ValueError( '--fp16-scale-window must be given explicitly when using a ' 'custom --update-freq schedule' ) scale_window = 2**14 / args.distributed_world_size / args.update_freq[0] else: scale_window = args.fp16_scale_window self.scaler = DynamicLossScaler( init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale, ) self.min_loss_scale = self.args.min_loss_scale @classmethod def build_optimizer(cls, args, params): """ Args: args (argparse.Namespace): fairseq args params (iterable): iterable of parameters to optimize """ fp16_optimizer = optim.build_optimizer(args, params) return cls(args, params, fp16_optimizer) @property def optimizer(self): return self.wrapped_optimizer.optimizer @property def optimizer_config(self): return self.wrapped_optimizer.optimizer_config def get_lr(self): return self.wrapped_optimizer.get_lr() def set_lr(self, lr): self.wrapped_optimizer.set_lr(lr)
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('adagrad') class Adagrad(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'weight_decay': self.args.weight_decay, }
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/adagrad.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import types import torch import torch.optim import torch.distributed as dist from . import FairseqOptimizer, register_optimizer @register_optimizer('adam') class FairseqAdam(FairseqOptimizer): """Adam optimizer for fairseq. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, args, params): super().__init__(args) if torch.cuda.is_available(): try: from apex.optimizers import FusedAdam as _FusedAdam # noqa self._optimizer = FusedAdam(params, **self.optimizer_config) except ImportError: self._optimizer = Adam(params, **self.optimizer_config) else: self._optimizer = Adam(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay, } def average_params(self): """Reduce Params is only used during BMUF distributed training.""" state_dict = self.optimizer.state_dict() total_gpus = float(dist.get_world_size()) for _, value in state_dict["state"].items(): value["exp_avg"] /= total_gpus value["exp_avg_sq"] /= total_gpus dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM) dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM) class Adam(torch.optim.Optimizer): """Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] p_data_fp32 = p.data.float() state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) if amsgrad: state['max_exp_avg_sq'] = state['max_exp_avg_sq'].type_as(p_data_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 if group['weight_decay'] != 0: p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) p_data_fp32.addcdiv_(-step_size, exp_avg, denom) p.data.copy_(p_data_fp32) return loss class FusedAdam(torch.optim.Optimizer): """ Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, bias_correction = True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0., max_grad_norm=0., amsgrad=False): global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") if amsgrad: raise RuntimeError('FusedAdam does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(FusedAdam, self).__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 @property def supports_memory_efficient_fp16(self): return True def step(self, closure=None, grads=None, scale=1., grad_norms=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None]*len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0])!=list: grads_group = [grads] else: grads_group = grads if grad_norms is None: grad_norms = [None]*len(self.param_groups) for group, grads_this_group, grad_norm in zip(self.param_groups, grads_group, grad_norms): if grads_this_group is None: grads_this_group = [None]*len(group['params']) # compute combined scale factor for this group combined_scale = scale if group['max_grad_norm'] > 0: # norm is in fact norm*scale clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm'] if clip > 1: combined_scale = clip * scale bias_correction = 1 if group['bias_correction'] else 0 for p, grad in zip(group['params'], grads_this_group): #note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead') p_data_fp32 = p.data.float() state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) exp_avg = state['exp_avg'] exp_avg_sq = state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 out_p = p.data fused_adam_cuda.adam(p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1, beta2, group['eps'], combined_scale, state['step'], self.eps_mode, bias_correction, group['weight_decay']) return loss
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('adadelta') class Adadelta(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients') parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', help='term added to the denominator to improve numerical stability') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'rho': self.args.adadelta_rho, 'eps': self.args.adadelta_eps, 'weight_decay': self.args.weight_decay, }
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/adadelta.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('fixed') class FixedSchedule(FairseqLRScheduler): """Decay the LR on a fixed schedule.""" def __init__(self, args, optimizer): super().__init__(args, optimizer) # set defaults args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0 self.lr = args.lr[0] if args.warmup_updates > 0: self.warmup_factor = 1. / args.warmup_updates else: self.warmup_factor = 1 @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch') parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') # fmt: on def get_next_lr(self, epoch): lrs = self.args.lr if self.args.force_anneal is None or epoch < self.args.force_anneal: # use fixed LR schedule next_lr = lrs[min(epoch, len(lrs) - 1)] else: # annneal based on lr_shrink next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal) return next_lr def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if self.args.warmup_updates > 0 and num_updates < self.args.warmup_updates: self.warmup_factor = (num_updates + 1) / float(self.args.warmup_updates) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr()
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/fixed_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim.lr_scheduler from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('reduce_lr_on_plateau') class ReduceLROnPlateau(FairseqLRScheduler): """ Decay the LR by a factor every time the validation loss plateaus. Also comes with optional warmup phase, where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter the lr is adjusted according to original reduce_on_plateau scheme During warmup:: lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) lr = lrs[update_num] """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with reduce_lr_on_plateau.' ' Consider --lr-scheduler=fixed instead.' ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer.optimizer, patience=0, factor=args.lr_shrink, threshold=args.lr_threshold) warmup_end_lr = args.lr[0] """if no warm up, sets initial lr to be args.lr[0]""" if args.warmup_init_lr < 0: args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr """ linearly warmup for the first args.warmup_updates""" if args.warmup_updates > 0: self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates """ this flag is either set from arg when no warm up, or set by step_update() when warmup finishes""" self.warmup_end = True if args.warmup_updates <= 0 else False """ initial learning rate""" """this self.lr is used only during init and/or warm up period""" self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)') parser.add_argument('--lr-threshold', default=1e-4, type=float, metavar='LT', help='Threshold for measuring the new optimum, \ to only focus on significant changes') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') # fmt: on def state_dict(self): """Return the LR scheduler state dict.""" return { 'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.lr_scheduler.best = state_dict['best'] if 'last_epoch' in state_dict: self.lr_scheduler.last_epoch = state_dict['last_epoch'] def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch if warmup finishes""" """ otherwise no update of lr on epoch boundaries""" if val_loss is not None and self.warmup_end is True: self.lr_scheduler.step(val_loss, epoch) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" """ if there is warmup""" if self.args.warmup_updates > 0: if num_updates <= self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates*self.lr_step self.optimizer.set_lr(self.lr) else: if self.warmup_end is False: self.warmup_end = True """else do nothing """ return self.optimizer.get_lr()
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from fairseq import registry from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import FairseqLRScheduler build_lr_scheduler, register_lr_scheduler, LR_SCHEDULER_REGISTRY = registry.setup_registry( '--lr-scheduler', base_class=FairseqLRScheduler, default='fixed', ) # automatically import any Python files in the optim/lr_scheduler/ directory for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): module = file[:file.find('.py')] importlib.import_module('fairseq.optim.lr_scheduler.' + module)
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('polynomial_decay') class PolynomialDecaySchedule(FairseqLRScheduler): """Decay the LR on a fixed schedule.""" def __init__(self, args, optimizer): super().__init__(args, optimizer) # set defaults args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0 self.lr = args.lr[0] if args.warmup_updates > 0: self.warmup_factor = 1. / args.warmup_updates else: self.warmup_factor = 1 self.end_learning_rate = args.end_learning_rate self.total_num_update = args.total_num_update self.power = args.power self.optimizer.set_lr(self.warmup_factor * self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--end-learning-rate', default=0.0, type=float) parser.add_argument('--power', default=1.0, type=float) parser.add_argument('--total-num-update', default=1000000, type=int) def get_next_lr(self, epoch): lrs = self.args.lr if self.args.force_anneal is None or epoch < self.args.force_anneal: # use fixed LR schedule next_lr = lrs[min(epoch, len(lrs) - 1)] else: # annneal based on lr_shrink next_lr = self.optimizer.get_lr() return next_lr def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if self.args.warmup_updates > 0 and num_updates <= self.args.warmup_updates: self.warmup_factor = num_updates / float(self.args.warmup_updates) lr = self.warmup_factor * self.lr elif num_updates >= self.total_num_update: lr = self.end_learning_rate else: warmup = self.args.warmup_updates lr_range = self.lr - self.end_learning_rate pct_remaining = 1 - (num_updates - warmup) / (self.total_num_update - warmup) lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate self.optimizer.set_lr(lr) return self.optimizer.get_lr()
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('inverse_sqrt') class InverseSquareRootSchedule(FairseqLRScheduler): """Decay the LR based on the inverse square root of the update number. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter we decay proportional to the number of updates, with a decay factor set to align with the configured learning rate. During warmup:: lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) lr = lrs[update_num] After warmup:: decay_factor = args.lr * sqrt(args.warmup_updates) lr = decay_factor / sqrt(update_num) """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with inverse_sqrt.' ' Consider --lr-scheduler=fixed instead.' ) warmup_end_lr = args.lr[0] if args.warmup_init_lr < 0: args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first args.warmup_updates self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates # then, decay prop. to the inverse square root of the update number self.decay_factor = warmup_end_lr * args.warmup_updates**0.5 # initial learning rate self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') # fmt: on def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates*self.lr_step else: self.lr = self.decay_factor * num_updates**-0.5 self.optimizer.set_lr(self.lr) return self.lr
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .. import FairseqOptimizer class FairseqLRScheduler(object): def __init__(self, args, optimizer): super().__init__() if not isinstance(optimizer, FairseqOptimizer): raise ValueError('optimizer must be an instance of FairseqOptimizer') self.args = args self.optimizer = optimizer self.best = None @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" pass def state_dict(self): """Return the LR scheduler state dict.""" return {'best': self.best} def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.best = state_dict['best'] def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" if val_loss is not None: if self.best is None: self.best = val_loss else: self.best = min(self.best, val_loss) def step_update(self, num_updates): """Update the learning rate after each update.""" return self.optimizer.get_lr()
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqLRScheduler, register_lr_scheduler import math @register_lr_scheduler('tri_stage') class TriStageLRSchedule(FairseqLRScheduler): """Tristage learning rate schedulr Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf Similar to inverse_squre_root scheduler, but tri_stage learning rate employs three stages LR scheduling: - warmup stage, starting from `lr` * `init_lr_scale`, linearly increased to `lr` in `warmup_steps` iterations - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps` iterations - decay stage, after hold stage, decay LR exponetially to `lr` * `final_lr_scale` in `decay_steps`; after that LR is keep as `final_lr_scale` * `lr` During warmup:: init_lr = args.init_lr_scale * args.lr lrs = torch.linspace(init_lr, args.lr, args.warmup_steps) lr = lrs[update_num] During hold:: lr = args.lr During decay:: decay_factor = - math.log(args.final_lr_scale) / args.decay_steps lr = args.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor) After that:: lr = args.lr * args.final_lr_scale """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with tri-stage lr.' ' Consider --lr-scheduler=fixed instead.' ) # calculate LR at each point self.peak_lr = args.lr[0] self.init_lr = args.init_lr_scale * args.lr[0] self.final_lr = args.final_lr_scale * args.lr[0] # remember the steps at each stage self.warmup_steps = args.warmup_steps self.hold_steps = args.hold_steps self.decay_steps = args.decay_steps self.warmup_rate = (self.peak_lr - self.init_lr) / self.warmup_steps self.decay_factor = -math.log(args.final_lr_scale) / args.decay_steps # initial learning rate self.lr = self.init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument( '--warmup-steps', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates' ) parser.add_argument( '--hold-steps', default=20000, type=int, metavar='N', help='steps in hold stage.' ) parser.add_argument( '--decay-steps', default=60000, type=int, metavar='N', help='steps in decay stages' ) parser.add_argument( '--init-lr-scale', default=0.01, type=float, help=""" initial learning rate scale during warmup phase; default is 0.01""") parser.add_argument( '--final-lr-scale', default=0.01, type=float, help="final learning rate scale; default to 0.01" ) # fmt: on def _decide_stage(self, update_step): """ return stage, and the corresponding steps within the current stage """ if update_step < self.warmup_steps: # warmup state return 0, update_step offset = self.warmup_steps if update_step < offset + self.hold_steps: # hold stage return 1, update_step - offset offset += self.hold_steps if update_step <= offset + self.decay_steps: # decay stage return 2, update_step - offset offset += self.decay_steps # still here ? constant lr stage return 3, update_step - offset def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" stage, steps_in_stage = self._decide_stage(num_updates) if stage == 0: self.lr = self.init_lr + self.warmup_rate * steps_in_stage elif stage == 1: self.lr = self.peak_lr elif stage == 2: self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage) elif stage == 3: self.lr = self.final_lr else: raise ValueError("Undefined stage") self.optimizer.set_lr(self.lr) return self.lr
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('cosine') class CosineSchedule(FairseqLRScheduler): """Assign LR based on a cyclical schedule that follows the cosine function. See https://arxiv.org/pdf/1608.03983.pdf for details. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured max learning rate (``--max-lr``). During warmup:: lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) lr = lrs[update_num] After warmup:: lr = lr_min + 0.5*(lr_max - lr_min)*(1 + cos(t_curr / t_i)) where ``t_curr`` is current percentage of updates within the current period range and ``t_i`` is the current period range, which is scaled by ``t_mul`` after every iteration. """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with cosine.' ' Consider --lr-scheduler=fixed instead.' ) warmup_end_lr = args.max_lr if args.warmup_init_lr < 0: args.warmup_init_lr = args.lr[0] self.min_lr = args.lr[0] self.max_lr = args.max_lr assert self.max_lr > self.min_lr, 'max_lr must be more than lr' self.t_mult = args.t_mult self.period = args.lr_period_updates if self.period <= 0: assert args.max_update >= 0, 'Either --max_update or --lr-period-updates must be set' self.period = args.max_update - args.warmup_updates if args.warmup_updates > 0: # linearly warmup for the first args.warmup_updates self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates else: self.lr_step = 1 self.warmup_updates = args.warmup_updates self.lr_shrink = args.lr_shrink # initial learning rate self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') parser.add_argument('--max-lr', type=float, metavar='LR', help='max learning rate, must be more than args.lr') parser.add_argument('--t-mult', default=1, type=float, metavar='LR', help='factor to grow the length of each period') parser.add_argument('--lr-period-updates', default=-1, type=float, metavar='LR', help='initial number of updates per period') parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing') # fmt: on def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates * self.lr_step else: curr_updates = num_updates - self.args.warmup_updates if self.t_mult != 1: i = math.floor(math.log(1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult)) t_i = self.t_mult ** i * self.period t_curr = curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period else: i = math.floor(curr_updates / self.period) t_i = self.period t_curr = curr_updates - (self.period * i) lr_shrink = self.lr_shrink ** i min_lr = self.min_lr * lr_shrink max_lr = self.max_lr * lr_shrink self.lr = min_lr + 0.5 * (max_lr - min_lr) * (1 + math.cos(math.pi * t_curr / t_i)) self.optimizer.set_lr(self.lr) return self.lr
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('triangular') class TriangularSchedule(FairseqLRScheduler): """Assign LR based on a triangular cyclical schedule. See https://arxiv.org/pdf/1506.01186.pdf for details. """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with triangular.' ' Consider --lr-scheduler=fixed instead.' ) lr = args.lr[0] assert args.max_lr > lr, 'max_lr must be more than lr' self.min_lr = lr self.max_lr = args.max_lr self.stepsize = args.lr_period_updates // 2 self.lr_shrink = args.lr_shrink self.shrink_min = args.shrink_min # initial learning rate self.lr = self.min_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr') parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', help='initial number of updates per period (cycle length)') parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing') parser.add_argument('--shrink-min', action='store_true', help='if set, also shrinks min lr') # fmt: on def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" cycle = math.floor(num_updates / (2 * self.stepsize)) lr_shrink = self.lr_shrink ** cycle max_lr = self.max_lr * lr_shrink if self.shrink_min: min_lr = self.min_lr * lr_shrink else: min_lr = self.min_lr x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1) self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x)) self.optimizer.set_lr(self.lr) return self.lr
data2vec_vision-main
infoxlm/fairseq/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional import torch from torch import Tensor @torch.jit.script def script_skip_tensor_list(x: List[Tensor], mask): res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x] outputs = [] for i, t in enumerate(res): if t.numel() != 0: outputs.append(t) else: outputs.append(x[i]) return outputs @torch.jit.script def script_skip_tensor(x: Tensor, mask): # None case if x.size(0) == 0: return x res = x[mask] if x.size(0) == mask.size(0) else x[:, mask] if res.numel() == 0: return x else: return res @torch.jit.script def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int): """ Expand 2D/3D tensor on dim=1 """ if x is None: return None assert x.dim() == 2 or x.dim() == 3 assert trg_dim >= x.size(1), (trg_dim, x.size()) if trg_dim == x.size(1): return x dims = [x.size(0), trg_dim - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1) return x @torch.jit.script def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor: return x if x is not None else y @torch.jit.script def fill_tensors(x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int) -> Optional[Tensor]: """ Filling tensor x with y at masked positions (dim=0). """ if x is None or x.size()[0] == 0 or y is None: return x assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() if n_selected == 0: return x assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx) x[mask] = y elif x.size(1) > y.size(1): x[mask] = torch.tensor(padding_idx).type_as(x) if x.dim() == 2: x[mask, :y.size(1)] = y else: x[mask, :y.size(1), :] = y else: x[mask] = y return x
data2vec_vision-main
infoxlm/fairseq/fairseq/models/model_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Base classes for various fairseq models. """ from typing import Dict, List, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.checkpoint_utils import prune_state_dict from fairseq.data import Dictionary from fairseq.models import FairseqDecoder, FairseqEncoder class BaseFairseqModel(nn.Module): """Base class for fairseq models.""" def __init__(self): super().__init__() self._is_generation_fast = False @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" pass @classmethod def build_model(cls, args, task): """Build a new model instance.""" raise NotImplementedError('Model must implement the build_model method') def get_targets(self, sample, net_output): """Get targets from either the sample or the net's output.""" return sample['target'] def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, 'decoder'): return self.decoder.get_normalized_probs(net_output, log_probs, sample) elif torch.is_tensor(net_output): logits = net_output.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def extract_features(self, *args, **kwargs): """Similar to *forward* but only return features.""" return self(*args, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return None def load_state_dict(self, state_dict, strict=True, args=None): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ self.upgrade_state_dict(state_dict) new_state_dict = prune_state_dict(state_dict, args) return super().load_state_dict(new_state_dict, strict) def upgrade_state_dict(self, state_dict): """Upgrade old state dicts to work with newer code.""" self.upgrade_state_dict_named(state_dict, '') def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code. Args: state_dict (dict): state dictionary to upgrade, in place name (str): the state dict key corresponding to the current module """ assert state_dict is not None def do_upgrade(m, prefix): if len(prefix) > 0: prefix += '.' for n, c in m.named_children(): name = prefix + n if hasattr(c, 'upgrade_state_dict_named'): c.upgrade_state_dict_named(state_dict, name) elif hasattr(c, 'upgrade_state_dict'): c.upgrade_state_dict(state_dict) do_upgrade(c, name) do_upgrade(self, name) def make_generation_fast_(self, **kwargs): """Optimize model for faster generation.""" if self._is_generation_fast: return # only apply once self._is_generation_fast = True # remove weight norm from all modules in the network def apply_remove_weight_norm(module): try: nn.utils.remove_weight_norm(module) except ValueError: # this module didn't have weight norm return self.apply(apply_remove_weight_norm) seen = set() def apply_make_generation_fast_(module): if module != self and hasattr(module, 'make_generation_fast_') \ and module not in seen: seen.add(module) module.make_generation_fast_(**kwargs) self.apply(apply_make_generation_fast_) def train(mode=True): if mode: raise RuntimeError('cannot train after make_generation_fast') # this model should no longer be used for training self.eval() self.train = train def prepare_for_onnx_export_(self, **kwargs): """Make model exportable via ONNX trace.""" seen = set() def apply_prepare_for_onnx_export_(module): if module != self and hasattr(module, 'prepare_for_onnx_export_') \ and module not in seen: seen.add(module) module.prepare_for_onnx_export_(**kwargs) self.apply(apply_prepare_for_onnx_export_) @classmethod def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', **kwargs): """ Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model file. Downloads and caches the pre-trained model file if needed. The base implementation returns a :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to generate translations or sample from language models. The underlying :class:`~fairseq.models.FairseqModel` can be accessed via the *generator.models* attribute. Other models may override this to implement custom hub interfaces. Args: model_name_or_path (str): either the name of a pre-trained model to load or a path/URL to a pre-trained model state dict checkpoint_file (str, optional): colon-separated list of checkpoint files in the model archive to ensemble (default: 'model.pt') data_name_or_path (str, optional): point args.data to the archive at the given path/URL. Can start with '.' or './' to reuse the model archive path. """ from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), **kwargs, ) print(x['args']) return hub_utils.GeneratorHubInterface(x['args'], x['task'], x['models']) @classmethod def hub_models(cls): return {} class FairseqEncoderDecoderModel(BaseFairseqModel): """Base class for encoder-decoder models. Args: encoder (FairseqEncoder): the encoder decoder (FairseqDecoder): the decoder """ def __init__(self, encoder, decoder): super().__init__() self.encoder = encoder self.decoder = decoder assert isinstance(self.encoder, FairseqEncoder) assert isinstance(self.decoder, FairseqDecoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs) return decoder_out def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) features = self.decoder.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs) return features def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return (self.encoder.max_positions(), self.decoder.max_positions()) def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() class FairseqModel(FairseqEncoderDecoderModel): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) utils.deprecation_warning( 'FairseqModel is deprecated, please use FairseqEncoderDecoderModel ' 'or BaseFairseqModel instead', stacklevel=4, ) class FairseqMultiModel(BaseFairseqModel): """Base class for combining multiple encoder-decoder models.""" def __init__(self, encoders, decoders): super().__init__() assert encoders.keys() == decoders.keys() self.keys = list(encoders.keys()) for key in self.keys: assert isinstance(encoders[key], FairseqEncoder) assert isinstance(decoders[key], FairseqDecoder) self.models = nn.ModuleDict({ key: FairseqModel(encoders[key], decoders[key]) for key in self.keys }) @staticmethod def build_shared_embeddings( dicts: Dict[str, Dictionary], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str] = None, ): """ Helper function to build shared embeddings for a set of languages after checking that all dicts corresponding to those languages are equivalent. Args: dicts: Dict of lang_id to its corresponding Dictionary langs: languages that we want to share embeddings for embed_dim: embedding dimension build_embedding: callable function to actually build the embedding pretrained_embed_path: Optional path to load pretrained embeddings """ shared_dict = dicts[langs[0]] if any(dicts[lang] != shared_dict for lang in langs): raise ValueError( '--share-*-embeddings requires a joined dictionary: ' '--share-encoder-embeddings requires a joined source ' 'dictionary, --share-decoder-embeddings requires a joined ' 'target dictionary, and --share-all-embeddings requires a ' 'joint source + target dictionary.' ) return build_embedding( shared_dict, embed_dim, pretrained_embed_path ) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): decoder_outs = {} for key in self.keys: encoder_out = self.models[key].encoder(src_tokens, src_lengths, **kwargs) decoder_outs[key] = self.models[key].decoder( prev_output_tokens, encoder_out, **kwargs, ) return decoder_outs def max_positions(self): """Maximum length supported by the model.""" return { key: (self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions()) for key in self.keys } def max_decoder_positions(self): """Maximum length supported by the decoder.""" return min(model.decoder.max_positions() for model in self.models.values()) @property def encoder(self): return self.models[self.keys[0]].encoder @property def decoder(self): return self.models[self.keys[0]].decoder class FairseqLanguageModel(BaseFairseqModel): """Base class for decoder-only models. Args: decoder (FairseqDecoder): the decoder """ def __init__(self, decoder): super().__init__() self.decoder = decoder assert isinstance(self.decoder, FairseqDecoder) def forward(self, src_tokens, **kwargs): """ Run the forward pass for a decoder-only model. Feeds a batch of tokens through the decoder to predict the next tokens. Args: src_tokens (LongTensor): tokens on which to condition the decoder, of shape `(batch, tgt_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: tuple: - the decoder's output of shape `(batch, seq_len, vocab)` - a dictionary with any model-specific outputs """ return self.decoder(src_tokens, **kwargs) def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, seq_len, embed_dim)` - a dictionary with any model-specific outputs """ return self.decoder.extract_features(src_tokens, **kwargs) def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return self.decoder.max_positions() def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() @property def supported_targets(self): return {'future'} class FairseqEncoderModel(BaseFairseqModel): """Base class for encoder-only models. Args: encoder (FairseqEncoder): the encoder """ def __init__(self, encoder): super().__init__() self.encoder = encoder assert isinstance(self.encoder, FairseqEncoder) def forward(self, src_tokens, src_lengths, **kwargs): """ Run the forward pass for a encoder-only model. Feeds a batch of tokens through the encoder to generate features. Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: the encoder's output, typically of shape `(batch, src_len, features)` """ return self.encoder(src_tokens, src_lengths, **kwargs) def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" encoder_out = net_output['encoder_out'] if torch.is_tensor(encoder_out): logits = encoder_out.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return self.encoder.max_positions()
data2vec_vision-main
infoxlm/fairseq/fairseq/models/fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn class FairseqEncoder(nn.Module): """Base class for encoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary def forward(self, src_tokens, src_lengths=None, **kwargs): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` """ raise NotImplementedError def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to `new_order`. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: `encoder_out` rearranged according to `new_order` """ raise NotImplementedError def max_positions(self): """Maximum input length supported by the encoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict(self, state_dict): """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict
data2vec_vision-main
infoxlm/fairseq/fairseq/models/fairseq_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) @register_model('fconv_self_att') class FConvModelSelfAtt(FairseqEncoderDecoderModel): @classmethod def hub_models(cls): return { 'conv.stories.pretrained': { 'path': 'https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz', 'checkpoint_file': 'pretrained_checkpoint.pt', 'tokenizer': 'nltk', }, 'conv.stories': { 'path': 'https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz', 'checkpoint_file': 'fusion_checkpoint.pt', 'tokenizer': 'nltk', 'pretrained': 'True', 'pretrained_checkpoint': './pretrained_checkpoint.pt', }, # Test set containing dictionaries 'data.stories': 'https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2', } def __init__(self, encoder, decoder, pretrained_encoder=None): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention) self.pretrained_encoder = pretrained_encoder if self.pretrained_encoder is None: encoders = {'encoder': encoder} else: encoders = {'encoder': encoder, 'pretrained': self.pretrained_encoder} # for fusion model, CompositeEncoder contains both pretrained and training encoders # these are forwarded and then combined in the decoder self.encoder = CompositeEncoder(encoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--self-attention', type=str, metavar='EXPR', help='decoder self-attention layers, ex: [True] + [False]*5') parser.add_argument('--multihead-attention-nheads', type=int, help='Number of heads to use in attention') parser.add_argument('--multihead-self-attention-nheads', type=int, help='Number of heads to use in self-attention') parser.add_argument('--encoder-attention', type=str, metavar='EXPR', help='encoder attention [True, ...]') parser.add_argument('--encoder-attention-nheads', type=int, help='Number of heads to use in encoder attention') parser.add_argument('--project-input', type=str, metavar='EXPR', help='Use projections in self-attention [True, ...]') parser.add_argument('--gated-attention', type=str, metavar='EXPR', help='Use GLU layers in self-attention projections [True, ...]') parser.add_argument('--downsample', type=str, metavar='EXPR', help='Use downsampling in self-attention [True, ...]') parser.add_argument('--pretrained-checkpoint', metavar='DIR', help='path to load checkpoint from pretrained model') parser.add_argument('--pretrained', type=str, metavar='EXPR', help='use pretrained model when training [True, ...]') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" trained_encoder, trained_decoder = None, None pretrained = eval(args.pretrained) if pretrained: print("| loading pretrained model") if not os.path.exists(args.pretrained_checkpoint): new_pretrained_checkpoint = os.path.join(args.data, args.pretrained_checkpoint) if os.path.exists(new_pretrained_checkpoint): args.pretrained_checkpoint = new_pretrained_checkpoint trained_model = checkpoint_utils.load_model_ensemble( filenames=[args.pretrained_checkpoint], task=task, )[0][0] trained_decoder = list(trained_model.children())[1] trained_encoder = list(trained_model.children())[0] # freeze pretrained model for param in trained_decoder.parameters(): param.requires_grad = False for param in trained_encoder.parameters(): param.requires_grad = False encoder = FConvEncoder( task.source_dictionary, embed_dim=args.encoder_embed_dim, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, attention=eval(args.encoder_attention), attention_nheads=args.encoder_attention_nheads ) decoder = FConvDecoder( task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, selfattention=eval(args.self_attention), attention_nheads=args.multihead_attention_nheads, selfattention_nheads=args.multihead_self_attention_nheads, project_input=eval(args.project_input), gated_attention=eval(args.gated_attention), downsample=eval(args.downsample), pretrained=pretrained, trained_decoder=trained_decoder ) model = FConvModelSelfAtt(encoder, decoder, trained_encoder) return model @property def pretrained(self): return self.pretrained_encoder is not None class FConvEncoder(FairseqEncoder): """Convolutional encoder""" def __init__( self, dictionary, embed_dim=512, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, attention=False, attention_nheads=1, ): super().__init__(dictionary) self.dropout = dropout self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout) ) self.attention.append( SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) input_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions for proj, conv, attention in zip(self.projections, self.convolutions, self.attention): residual = x if proj is None else proj(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = F.dropout(x, p=self.dropout, training=self.training) padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if attention is not None: x = attention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5) return { 'encoder_out': (x, y), 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): encoder_out['encoder_out'] = tuple( eo.index_select(0, new_order) for eo in encoder_out['encoder_out'] ) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if 'pretrained' in encoder_out: encoder_out['pretrained']['encoder_out'] = tuple( eo.index_select(0, new_order) for eo in encoder_out['pretrained']['encoder_out'] ) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions() class FConvDecoder(FairseqDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 8, attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None, ): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([2])) self.pretrained = pretrained self.pretrained_decoder = trained_decoder self.dropout = dropout self.need_attn = True in_channels = convolutions[0][0] def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) selfattention = expand_bool_array(selfattention) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError('Attention is expected to be a list of booleans of ' 'length equal to the number of layers.') num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, padding_idx, ) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.selfattention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( LinearizedConv1d( in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout, ) ) self.attention.append( DownsampledMultiHeadAttention( out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False, ) if attention[i] else None ) self.attproj.append( Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None ) self.selfattention.append( SelfAttention( out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample, ) if selfattention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, out_embed_dim) self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) # model fusion if self.pretrained: # independent gates are learned from the concatenated input self.gate1 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid()) self.gate2 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid()) # pretrained and trained models are joined self.joining = nn.Sequential( Linear(out_embed_dim*2, out_embed_dim*2), LayerNorm(out_embed_dim*2), nn.GLU(), Linear(out_embed_dim, out_embed_dim*2), LayerNorm(out_embed_dim*2), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim) ) # pretrained model contains an output layer that is nhid -> vocab size # but the models are combined in their hidden state # the hook stores the output of the pretrained model forward self.pretrained_outputs = {} def save_output(): def hook(a, b, output): self.pretrained_outputs["out"] = output return hook self.pretrained_decoder.fc2.register_forward_hook(save_output()) def forward(self, prev_output_tokens, encoder_out): trained_encoder_out = encoder_out['pretrained'] if self.pretrained else None encoder_out = encoder_out['encoder']['encoder_out'] encoder_a, encoder_b = self._split_encoder_out(encoder_out) # embed positions positions = self.embed_positions(prev_output_tokens) # embed tokens and positions x = self.embed_tokens(prev_output_tokens) + positions x = F.dropout(x, p=self.dropout, training=self.training) target_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions avg_attn_scores = None for proj, conv, attention, selfattention, attproj in zip( self.projections, self.convolutions, self.attention, self.selfattention, self.attproj ): residual = x if proj is None else proj(x) x = F.dropout(x, p=self.dropout, training=self.training) x = conv(x) x = F.glu(x, dim=2) # attention if attention is not None: r = x x, attn_scores = attention(attproj(x) + target_embedding, encoder_a, encoder_b) x = x + r if not self.training and self.need_attn: if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) if selfattention is not None: x = selfattention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(0, 1) # project back to size of vocabulary x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) if not self.pretrained: x = self.fc3(x) # fusion gating if self.pretrained: trained_x, _ = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out) y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1) gate1 = self.gate1(y) gate2 = self.gate2(y) gated_x1 = gate1 * x gated_x2 = gate2 * self.pretrained_outputs["out"] fusion = torch.cat([gated_x1, gated_x2], dim=-1) fusion = self.joining(fusion) fusion_output = self.fc3(fusion) return fusion_output, avg_attn_scores else: return x, avg_attn_scores def max_positions(self): """Maximum output length supported by the decoder.""" return self.embed_positions.max_positions() def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _split_encoder_out(self, encoder_out): """Split and transpose encoder outputs.""" # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(0, 1).contiguous() encoder_b = encoder_b.transpose(0, 1).contiguous() result = (encoder_a, encoder_b) return result class SelfAttention(nn.Module): def __init__(self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False): super().__init__() self.attention = DownsampledMultiHeadAttention( out_channels, embed_dim, num_heads, dropout=0, bias=True, project_input=project_input, gated=gated, downsample=downsample, ) self.in_proj_q = Linear(out_channels, embed_dim) self.in_proj_k = Linear(out_channels, embed_dim) self.in_proj_v = Linear(out_channels, embed_dim) self.ln = LayerNorm(out_channels) def forward(self, x): residual = x query = self.in_proj_q(x) key = self.in_proj_k(x) value = self.in_proj_v(x) x, _ = self.attention(query, key, value, mask_future_timesteps=True, use_scalar_bias=True) return self.ln(x + residual) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) m.weight.data.normal_(0, 0.1) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) m.weight.data.normal_(0, 0.1) return m def Linear(in_features, out_features, dropout=0.): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return m def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0., **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m @register_model_architecture('fconv_self_att', 'fconv_self_att') def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 3') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 8') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_attention = getattr(args, 'decoder_attention', 'True') args.self_attention = getattr(args, 'self_attention', 'False') args.encoder_attention = getattr(args, 'encoder_attention', 'False') args.multihead_attention_nheads = getattr(args, 'multihead_attention_nheads', 1) args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 1) args.encoder_attention_nheads = getattr(args, 'encoder_attention_nheads', 1) args.project_input = getattr(args, 'project_input', 'False') args.gated_attention = getattr(args, 'gated_attention', 'False') args.downsample = getattr(args, 'downsample', 'False') args.pretrained_checkpoint = getattr(args, 'pretrained_checkpoint', '') args.pretrained = getattr(args, 'pretrained', 'False') @register_model_architecture('fconv_self_att', 'fconv_self_att_wp') def fconv_self_att_wp(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.self_attention = getattr(args, 'self_attention', 'True') args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4) args.project_input = getattr(args, 'project_input', 'True') args.gated_attention = getattr(args, 'gated_attention', 'True') args.downsample = getattr(args, 'downsample', 'True') base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/fconv_self_att.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import options from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.fconv import FConvDecoder @register_model('fconv_lm') class FConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if hasattr(args, 'max_target_positions') and not hasattr(args, 'tokens_per_sample'): args.tokens_per_sample = args.max_target_positions decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.tokens_per_sample, share_embed=False, positional_embeddings=False, adaptive_softmax_cutoff=( options.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == 'adaptive_loss' else None ), adaptive_softmax_dropout=args.adaptive_softmax_dropout, ) return FConvLanguageModel(decoder) @register_model_architecture('fconv_lm', 'fconv_lm') def base_lm_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128) args.decoder_layers = getattr(args, 'decoder_layers', '[(1268, 4)] * 13') args.decoder_attention = getattr(args, 'decoder_attention', 'False') args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) @register_model_architecture('fconv_lm', 'fconv_lm_dauphin_wikitext103') def fconv_lm_dauphin_wikitext103(args): layers = '[(850, 6)] * 3' layers += ' + [(850, 1)] * 1' layers += ' + [(850, 5)] * 4' layers += ' + [(850, 1)] * 1' layers += ' + [(850, 4)] * 3' layers += ' + [(1024, 4)] * 1' layers += ' + [(2048, 4)] * 1' args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 280) args.decoder_layers = getattr(args, 'decoder_layers', layers) args.decoder_attention = getattr(args, 'decoder_attention', 'False') args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,20000,200000') base_lm_architecture(args) @register_model_architecture('fconv_lm', 'fconv_lm_dauphin_gbw') def fconv_lm_dauphin_gbw(args): layers = '[(512, 5)]' layers += ' + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3' layers += ' + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3' layers += ' + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6' layers += ' + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]' args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128) args.decoder_layers = getattr(args, 'decoder_layers', layers) args.decoder_attention = getattr(args, 'decoder_attention', 'False') args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000') base_lm_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/fconv_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict from fairseq import utils from fairseq.models import ( FairseqMultiModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( base_architecture, Embedding, TransformerModel, TransformerEncoder, TransformerDecoder, ) @register_model('multilingual_transformer') class MultilingualTransformerModel(FairseqMultiModel): """Train Transformer models for multiple language pairs simultaneously. Requires `--task multilingual_translation`. We inherit all arguments from TransformerModel and assume that all language pairs use a single Transformer architecture. In addition, we provide several options that are specific to the multilingual setting. Args: --share-encoder-embeddings: share encoder embeddings across all source languages --share-decoder-embeddings: share decoder embeddings across all target languages --share-encoders: share all encoder params (incl. embeddings) across all source languages --share-decoders: share all decoder params (incl. embeddings) across all target languages """ def __init__(self, encoders, decoders): super().__init__(encoders, decoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument('--share-encoder-embeddings', action='store_true', help='share encoder embeddings across languages') parser.add_argument('--share-decoder-embeddings', action='store_true', help='share decoder embeddings across languages') parser.add_argument('--share-encoders', action='store_true', help='share encoders across languages') parser.add_argument('--share-decoders', action='store_true', help='share decoders across languages') @classmethod def build_model(cls, args, task): """Build a new model instance.""" from fairseq.tasks.multilingual_translation import MultilingualTranslationTask assert isinstance(task, MultilingualTranslationTask) # make sure all arguments are present in older models base_multilingual_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = 1024 if not hasattr(args, 'max_target_positions'): args.max_target_positions = 1024 src_langs = [lang_pair.split('-')[0] for lang_pair in task.model_lang_pairs] tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.model_lang_pairs] if args.share_encoders: args.share_encoder_embeddings = True if args.share_decoders: args.share_decoder_embeddings = True def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb # build shared embeddings (if applicable) shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None if args.share_all_embeddings: if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=task.langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path, ) shared_decoder_embed_tokens = shared_encoder_embed_tokens args.share_decoder_input_output_embed = True else: if args.share_encoder_embeddings: shared_encoder_embed_tokens = ( FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=src_langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path, ) ) if args.share_decoder_embeddings: shared_decoder_embed_tokens = ( FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=tgt_langs, embed_dim=args.decoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.decoder_embed_path, ) ) # encoders/decoders for each language lang_encoders, lang_decoders = {}, {} def get_encoder(lang): if lang not in lang_encoders: if shared_encoder_embed_tokens is not None: encoder_embed_tokens = shared_encoder_embed_tokens else: encoder_embed_tokens = build_embedding( task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path ) lang_encoders[lang] = TransformerEncoder(args, task.dicts[lang], encoder_embed_tokens) return lang_encoders[lang] def get_decoder(lang): if lang not in lang_decoders: if shared_decoder_embed_tokens is not None: decoder_embed_tokens = shared_decoder_embed_tokens else: decoder_embed_tokens = build_embedding( task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path ) lang_decoders[lang] = TransformerDecoder(args, task.dicts[lang], decoder_embed_tokens) return lang_decoders[lang] # shared encoders/decoders (if applicable) shared_encoder, shared_decoder = None, None if args.share_encoders: shared_encoder = get_encoder(src_langs[0]) if args.share_decoders: shared_decoder = get_decoder(tgt_langs[0]) encoders, decoders = OrderedDict(), OrderedDict() for lang_pair, src, tgt in zip(task.model_lang_pairs, src_langs, tgt_langs): encoders[lang_pair] = shared_encoder if shared_encoder is not None else get_encoder(src) decoders[lang_pair] = shared_decoder if shared_decoder is not None else get_decoder(tgt) return MultilingualTransformerModel(encoders, decoders) def load_state_dict(self, state_dict, strict=True): state_dict_subset = state_dict.copy() for k, _ in state_dict.items(): assert k.startswith('models.') lang_pair = k.split('.')[1] if lang_pair not in self.models: del state_dict_subset[k] super().load_state_dict(state_dict_subset, strict=strict) @register_model_architecture('multilingual_transformer', 'multilingual_transformer') def base_multilingual_architecture(args): base_architecture(args) args.share_encoder_embeddings = getattr(args, 'share_encoder_embeddings', False) args.share_decoder_embeddings = getattr(args, 'share_decoder_embeddings', False) args.share_encoders = getattr(args, 'share_encoders', False) args.share_decoders = getattr(args, 'share_decoders', False) @register_model_architecture('multilingual_transformer', 'multilingual_transformer_iwslt_de_en') def multilingual_transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_multilingual_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/multilingual_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.nonautoregressive_transformer import NATransformerModel def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1): # s: input batch # V: vocabulary size rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device) choices = torch.rand(size=s.size(), device=s.device) choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1) replace = choices < beta / 3 repeat = (choices >= beta / 3) & (choices < beta * 2 / 3) swap = (choices >= beta * 2 / 3) & (choices < beta) safe = choices >= beta for i in range(s.size(1) - 1): rand_word = rand_words[:, i] next_word = s[:, i + 1] self_word = s[:, i] replace_i = replace[:, i] swap_i = swap[:, i] & (next_word != 3) repeat_i = repeat[:, i] & (next_word != 3) safe_i = safe[:, i] | ((next_word == 3) & (~replace_i)) s[:, i] = ( self_word * (safe_i | repeat_i).long() + next_word * swap_i.long() + rand_word * replace_i.long() ) s[:, i + 1] = ( next_word * (safe_i | replace_i).long() + self_word * (swap_i | repeat_i).long() ) return s def gumbel_noise(input, TINY=1e-8): return input.new_zeros(*input.size()).uniform_().add_( TINY).log_().neg_().add_(TINY).log_().neg_() @register_model("iterative_nonautoregressive_transformer") class IterNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) parser.add_argument("--train-step", type=int, help="number of refinement iterations during training") parser.add_argument("--dae-ratio", type=float, help="the probability of switching to the denoising auto-encoder loss") parser.add_argument("--stochastic-approx", action="store_true", help="sampling from the decoder as the inputs for next iteration") @classmethod def build_model(cls, args, task): model = super().build_model(args, task) model.train_step = getattr(args, "train_step", 4) model.dae_ratio = getattr(args, "dae_ratio", 0.5) model.stochastic_approx = getattr(args, "stochastic_approx", False) return model def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): B, T = prev_output_tokens.size() # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) length_out, length_tgt = self.decoder.forward_length_prediction( encoder_out, tgt_tokens ) word_ins_outs, word_ins_tgts, word_ins_masks = [], [], [] for t in range(self.train_step): word_ins_out, word_ins_tgt, word_ins_mask = self.decoder( prev_output_tokens, encoder_out=encoder_out, tgt_tokens=tgt_tokens, step=t, ) word_ins_outs.append(word_ins_out) word_ins_tgts.append(word_ins_tgt) word_ins_masks.append(word_ins_mask) if t < (self.train_step - 1): # prediction for next iteration if self.stochastic_approx: word_ins_prediction = ( word_ins_out + gumbel_noise(word_ins_out) ).max(-1)[1] else: word_ins_prediction = word_ins_out.max(-1)[1] prev_output_tokens = prev_output_tokens.masked_scatter( word_ins_mask, word_ins_prediction[word_ins_mask] ) if self.dae_ratio > 0: # we do not perform denoising for the first iteration corrputed = ( torch.rand(size=(B,), device=prev_output_tokens.device) < self.dae_ratio ) corrputed_tokens = _sequential_poisoning( tgt_tokens[corrputed], len(self.tgt_dict), 0.33, self.bos, self.eos, self.pad, ) prev_output_tokens[corrputed] = corrputed_tokens # concat everything word_ins_out = torch.cat(word_ins_outs, 0) word_ins_tgt = torch.cat(word_ins_tgts, 0) word_ins_mask = torch.cat(word_ins_masks, 0) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor } } @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer" ) def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) args.train_step = getattr(args, "train_step", 4) args.dae_ratio = getattr(args, "dae_ratio", 0.5) args.stochastic_approx = getattr(args, "stochastic_approx", False) @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer_wmt_en_de", ) def iter_nat_wmt_en_de(args): base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/iterative_nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq import utils class FairseqDecoder(nn.Module): """Base class for decoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary self.onnx_trace = False def forward(self, prev_output_tokens, encoder_out=None, **kwargs): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def output_layer(self, features, **kwargs): """ Project features to the default output size, e.g., vocabulary size. Args: features (Tensor): features returned by *extract_features*. """ raise NotImplementedError def get_normalized_probs(self, net_output, log_probs, sample): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, 'adaptive_softmax') and self.adaptive_softmax is not None: if sample is not None: assert 'target' in sample target = sample['target'] else: target = None out = self.adaptive_softmax.get_log_prob(net_output[0], target=target) return out.exp_() if not log_probs else out logits = net_output[0] if log_probs: return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace) else: return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace) def max_positions(self): """Maximum input length supported by the decoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict(self, state_dict): """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict def prepare_for_onnx_export_(self): self.onnx_trace = True
data2vec_vision-main
infoxlm/fairseq/fairseq/models/fairseq_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import importlib import os from .fairseq_decoder import FairseqDecoder from .fairseq_encoder import FairseqEncoder from .fairseq_incremental_decoder import FairseqIncrementalDecoder from .fairseq_model import ( BaseFairseqModel, FairseqEncoderModel, FairseqEncoderDecoderModel, FairseqLanguageModel, FairseqModel, FairseqMultiModel, ) from .composite_encoder import CompositeEncoder from .distributed_fairseq_model import DistributedFairseqModel MODEL_REGISTRY = {} ARCH_MODEL_REGISTRY = {} ARCH_MODEL_INV_REGISTRY = {} ARCH_CONFIG_REGISTRY = {} __all__ = [ 'BaseFairseqModel', 'CompositeEncoder', 'DistributedFairseqModel', 'FairseqDecoder', 'FairseqEncoder', 'FairseqEncoderDecoderModel', 'FairseqEncoderModel', 'FairseqIncrementalDecoder', 'FairseqLanguageModel', 'FairseqModel', 'FairseqMultiModel', ] def build_model(args, task): return ARCH_MODEL_REGISTRY[args.arch].build_model(args, task) def register_model(name): """ New model types can be added to fairseq with the :func:`register_model` function decorator. For example:: @register_model('lstm') class LSTM(FairseqEncoderDecoderModel): (...) .. note:: All models must implement the :class:`BaseFairseqModel` interface. Typically you will extend :class:`FairseqEncoderDecoderModel` for sequence-to-sequence tasks or :class:`FairseqLanguageModel` for language modeling tasks. Args: name (str): the name of the model """ def register_model_cls(cls): if name in MODEL_REGISTRY: raise ValueError('Cannot register duplicate model ({})'.format(name)) if not issubclass(cls, BaseFairseqModel): raise ValueError('Model ({}: {}) must extend BaseFairseqModel'.format(name, cls.__name__)) MODEL_REGISTRY[name] = cls return cls return register_model_cls def register_model_architecture(model_name, arch_name): """ New model architectures can be added to fairseq with the :func:`register_model_architecture` function decorator. After registration, model architectures can be selected with the ``--arch`` command-line argument. For example:: @register_model_architecture('lstm', 'lstm_luong_wmt_en_de') def lstm_luong_wmt_en_de(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000) (...) The decorated function should take a single argument *args*, which is a :class:`argparse.Namespace` of arguments parsed from the command-line. The decorated function should modify these arguments in-place to match the desired architecture. Args: model_name (str): the name of the Model (Model must already be registered) arch_name (str): the name of the model architecture (``--arch``) """ def register_model_arch_fn(fn): if model_name not in MODEL_REGISTRY: raise ValueError('Cannot register model architecture for unknown model type ({})'.format(model_name)) if arch_name in ARCH_MODEL_REGISTRY: raise ValueError('Cannot register duplicate model architecture ({})'.format(arch_name)) if not callable(fn): raise ValueError('Model architecture must be callable ({})'.format(arch_name)) ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name] ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name) ARCH_CONFIG_REGISTRY[arch_name] = fn return fn return register_model_arch_fn # automatically import any Python files in the models/ directory models_dir = os.path.dirname(__file__) for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)): model_name = file[:file.find('.py')] if file.endswith('.py') else file module = importlib.import_module('fairseq.models.' + model_name) # extra `model_parser` for sphinx if model_name in MODEL_REGISTRY: parser = argparse.ArgumentParser(add_help=False) group_archs = parser.add_argument_group('Named architectures') group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name]) group_args = parser.add_argument_group('Additional command-line arguments') MODEL_REGISTRY[model_name].add_args(group_args) globals()[model_name + '_parser'] = parser
data2vec_vision-main
infoxlm/fairseq/fairseq/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange # -------------- Helper Functions --------------------------------------------------- # def _skip(x, mask): """ Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors. """ if isinstance(x, int): return x if x is None: return None if isinstance(x, torch.Tensor): if x.size(0) == mask.size(0): return x[mask] elif x.size(1) == mask.size(0): return x[:, mask] if isinstance(x, list): return [_skip(x_i, mask) for x_i in x] if isinstance(x, dict): return {k: _skip(v, mask) for k, v in x.items()} raise NotImplementedError def _skip_encoder_out(encoder, encoder_out, mask): if not mask.any(): return encoder_out else: return encoder.reorder_encoder_out(encoder_out, mask.nonzero().squeeze()) def _fill(x, mask, y, padding_idx): """ Filling tensor x with y at masked positions (dim=0). """ if x is None: return y assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): dims = [x.size(0), y.size(1) - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1) x[mask] = y elif x.size(1) > y.size(1): x[mask] = padding_idx if x.dim() == 2: x[mask, :y.size(1)] = y else: x[mask, :y.size(1), :] = y else: x[mask] = y return x def load_libnat(): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e return libnat def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): libnat = load_libnat() in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] # generate labels masked_tgt_masks = [] for mask_input in mask_inputs: mask_label = [] for beam_size in mask_input[1:-1]: # HACK 1:-1 mask_label += [0] + [1 for _ in range(beam_size)] masked_tgt_masks.append( mask_label + [0 for _ in range(out_seq_len - len(mask_label))] ) mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor masked_tgt_masks = torch.tensor( masked_tgt_masks, device=out_tokens.device ).bool() mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets def _get_del_targets(in_tokens, out_tokens, padding_idx): libnat = load_libnat() out_seq_len = out_tokens.size(1) with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] # transform to tensor word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) return word_del_targets def _get_del_ins_targets(in_tokens, out_tokens, padding_idx): libnat = load_libnat() in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) return word_del_targets, mask_ins_targets def _apply_ins_masks( in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx ): in_masks = in_tokens.ne(padding_idx) in_lengths = in_masks.sum(1) # HACK: hacky way to shift all the paddings to eos first. in_tokens.masked_fill_(~in_masks, eos_idx) mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0) out_lengths = in_lengths + mask_ins_pred.sum(1) out_max_len = out_lengths.max() out_masks = ( new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None] ) reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1) out_tokens = ( in_tokens.new_zeros(in_tokens.size(0), out_max_len) .fill_(padding_idx) .masked_fill_(out_masks, unk_idx) ) out_tokens[:, 0] = in_tokens[:, 0] out_tokens.scatter_(1, reordering, in_tokens[:, 1:]) out_scores = None if in_scores is not None: in_scores.masked_fill_(~in_masks, 0) out_scores = in_scores.new_zeros(*out_tokens.size()) out_scores[:, 0] = in_scores[:, 0] out_scores.scatter_(1, reordering, in_scores[:, 1:]) return out_tokens, out_scores def _apply_ins_words( in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx ): word_ins_masks = in_tokens.eq(unk_idx) out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks]) if in_scores is not None: out_scores = in_scores.masked_scatter( word_ins_masks, word_ins_scores[word_ins_masks] ) else: out_scores = None return out_tokens, out_scores def _apply_del_words( in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx ): # apply deletion to a tensor in_masks = in_tokens.ne(padding_idx) bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx) max_len = in_tokens.size(1) word_del_pred.masked_fill_(~in_masks, 1) word_del_pred.masked_fill_(bos_eos_masks, 0) reordering = ( new_arange(in_tokens) .masked_fill_(word_del_pred, max_len) .sort(1)[1] ) out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering) out_scores = None if in_scores is not None: out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) out_attn = None if in_attn is not None: _mask = word_del_pred[:, :, None].expand_as(in_attn) _reordering = reordering[:, :, None].expand_as(in_attn) out_attn = in_attn.masked_fill(_mask, 0.).gather(1, _reordering) return out_tokens, out_scores, out_attn # ------------------------------------------------------------------------------------- # @register_model("levenshtein_transformer") class LevenshteinTransformerModel(TransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.tgt_dict = decoder.dictionary self.bos = decoder.dictionary.bos() self.eos = decoder.dictionary.eos() self.pad = decoder.dictionary.pad() self.unk = decoder.dictionary.unk() @staticmethod def add_args(parser): TransformerModel.add_args(parser) parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) parser.add_argument( "--early-exit", default="6,6,6", type=str, help="number of decoder layers before word_del, mask_ins, word_ins", ) parser.add_argument( "--no-share-discriminator", action="store_true", help="separate parameters for discriminator", ) parser.add_argument( "--no-share-maskpredictor", action="store_true", help="separate parameters for mask-predictor", ) parser.add_argument( "--share-discriminator-maskpredictor", action="store_true", help="share the parameters for both mask-predictor and discriminator", ) parser.add_argument( "--sampling-for-deletion", action='store_true', help='instead of argmax, use sampling to predict the tokens' ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder @classmethod def build_encoder(cls, args, src_dict, embed_tokens): encoder = TransformerEncoder(args, src_dict, embed_tokens) if getattr(args, "apply_bert_init", False): encoder.apply(init_bert_params) return encoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk ) mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) mask_ins_out, _ = self.decoder.forward_mask_ins( prev_output_tokens, encoder_out=encoder_out ) word_ins_out, _ = self.decoder.forward_word_ins( masked_tgt_tokens, encoder_out=encoder_out ) # make online prediction if self.decoder.sampling_for_deletion: word_predictions = torch.multinomial( F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1).view( word_ins_out.size(0), -1) else: word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1] word_predictions.masked_scatter_( ~masked_tgt_masks, tgt_tokens[~masked_tgt_masks] ) # generate training labels for deletion word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad) word_del_out, _ = self.decoder.forward_word_del( word_predictions, encoder_out) word_del_masks = word_predictions.ne(self.pad) return { "mask_ins": { "out": mask_ins_out, "tgt": mask_ins_targets, "mask": mask_ins_masks, "ls": 0.01, }, "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": masked_tgt_masks, "ls": self.args.label_smoothing, "nll_loss": True }, "word_del": { "out": word_del_out, "tgt": word_del_targets, "mask": word_del_masks } } def forward_encoder(self, encoder_inputs): return self.encoder(*encoder_inputs) def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn history = decoder_out.history bsz = output_tokens.size(0) if max_ratio is None: max_lens = torch.zeros_like(output_tokens).fill_(255) else: if encoder_out.encoder_padding_mask is None: max_src_len = encoder_out.encoder_out.size(1) src_lens = encoder_out.encoder_out.new(bsz).fill_(max_src_len) else: src_lens = (~encoder_out.encoder_padding_mask).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip word_del_out, word_del_attn = self.decoder.forward_word_del( _skip(output_tokens, can_del_word), _skip_encoder_out(self.encoder, encoder_out, can_del_word) ) word_del_score = F.log_softmax(word_del_out, 2) word_del_pred = word_del_score.max(-1)[1].bool() _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.) if history is not None: history.append(output_tokens.clone()) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: mask_ins_out, _ = self.decoder.forward_mask_ins( _skip(output_tokens, can_ins_mask), _skip_encoder_out(self.encoder, encoder_out, can_ins_mask) ) mask_ins_score = F.log_softmax(mask_ins_out, 2) if eos_penalty > 0.0: mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty mask_ins_pred = mask_ins_score.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) if history is not None: history.append(output_tokens.clone()) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: word_ins_out, word_ins_attn = self.decoder.forward_word_ins( _skip(output_tokens, can_ins_word), _skip_encoder_out(self.encoder, encoder_out, can_ins_word) ) word_ins_score, word_ins_pred = F.log_softmax(word_ins_out, 2).max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.) if history is not None: history.append(output_tokens.clone()) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=history ) def initialize_output_tokens(self, encoder_out, src_tokens): initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2) initial_output_tokens[:, 0] = self.bos initial_output_tokens[:, 1] = self.eos initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out.encoder_out) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None ) class LevenshteinTransformerDecoder(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None) self.embed_word_del = Embedding(2, self.output_embed_dim, None) # del_word, ins_mask, ins_word self.early_exit = [int(i) for i in args.early_exit.split(',')] assert len(self.early_exit) == 3 # copy layers for mask-predict/deletion self.layers_msk = None if getattr(args, "no_share_maskpredictor", False): self.layers_msk = nn.ModuleList([ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[1]) ]) self.layers_del = None if getattr(args, "no_share_discriminator", False): self.layers_del = nn.ModuleList([ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[0]) ]) if getattr(args, "share_discriminator_maskpredictor", False): assert getattr(args, "no_share_discriminator", False), "must set saperate discriminator" self.layers_msk = self.layers_del def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, layers=None, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) layers = self.layers if layers is None else layers early_exit = len(layers) if early_exit is None else early_exit for _, layer in enumerate(layers[: early_exit]): x, attn = layer( x, encoder_out.encoder_out if encoder_out is not None else None, encoder_out.encoder_padding_mask if encoder_out is not None else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} def forward_mask_ins(self, prev_output_tokens, encoder_out=None, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[1], layers=self.layers_msk, **unused ) features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) return F.linear(features_cat, self.embed_mask_ins.weight), extra['attn'] def forward_word_ins(self, prev_output_tokens, encoder_out=None, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[2], layers=self.layers, **unused ) return self.output_layer(features), extra['attn'] def forward_word_del(self, prev_output_tokens, encoder_out=None, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[0], layers=self.layers_del, **unused ) return F.linear(features, self.embed_word_del.weight), extra['attn'] @register_model_architecture("levenshtein_transformer", "levenshtein_transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.early_exit = getattr(args, "early_exit", "6,6,6") args.no_share_discriminator = getattr(args, "no_share_discriminator", False) args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False) args.share_discriminator_maskpredictor = getattr(args, "share_discriminator_maskpredictor", False) args.no_share_last_layer = getattr(args, "no_share_last_layer", False) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de" ) def levenshtein_transformer_wmt_en_de(args): base_architecture(args) # similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big" ) def levenshtein_transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) # default parameters used in tensor2tensor implementation @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big" ) def levenshtein_transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) levenshtein_transformer_vaswani_wmt_en_de_big(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/levenshtein_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from typing import Any, Dict from fairseq import checkpoint_utils from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture as transformer_base_architecture, ) @register_model("transformer_from_pretrained_xlm") class TransformerFromPretrainedXLMModel(TransformerModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--pretrained-xlm-checkpoint", type=str, metavar="STR", help="XLM model to use for initializing transformer encoder and/or decoder", ) parser.add_argument( "--init-encoder-only", action="store_true", help="if set, don't load the XLM weights and embeddings into decoder", ) parser.add_argument( "--init-decoder-only", action="store_true", help="if set, don't load the XLM weights and embeddings into encoder", ) @classmethod def build_model(self, args, task, cls_dictionary=MaskedLMDictionary): assert hasattr(args, "pretrained_xlm_checkpoint"), ( "You must specify a path for --pretrained-xlm-checkpoint to use " "--arch transformer_from_pretrained_xlm" ) assert isinstance(task.source_dictionary, cls_dictionary) and isinstance( task.target_dictionary, cls_dictionary ), ( "You should use a MaskedLMDictionary when using --arch " "transformer_from_pretrained_xlm because the pretrained XLM model " "was trained using data binarized with MaskedLMDictionary. " "For translation, you may want to use --task " "translation_from_pretrained_xlm" ) assert not ( getattr(args, "init_encoder_only", False) and getattr(args, "init_decoder_only", False) ), "Only one of --init-encoder-only and --init-decoder-only can be set." return super().build_model(args, task) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderFromPretrainedXLM(args, tgt_dict, embed_tokens) def upgrade_state_dict_with_xlm_weights( state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str ) -> Dict[str, Any]: """ Load XLM weights into a Transformer encoder or decoder model. Args: state_dict: state dict for either TransformerEncoder or TransformerDecoder pretrained_xlm_checkpoint: checkpoint to load XLM weights from Raises: AssertionError: If architecture (num layers, attention heads, etc.) does not match between the current Transformer encoder or decoder and the pretrained_xlm_checkpoint """ if not os.path.exists(pretrained_xlm_checkpoint): raise IOError("Model file not found: {}".format(pretrained_xlm_checkpoint)) state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint) xlm_state_dict = state["model"] for key in xlm_state_dict.keys(): for search_key in ["embed_tokens", "embed_positions", "layers"]: if search_key in key: subkey = key[key.find(search_key):] assert subkey in state_dict, ( "{} Transformer encoder / decoder " "state_dict does not contain {}. Cannot " "load {} from pretrained XLM checkpoint " "{} into Transformer.".format( str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint) ) state_dict[subkey] = xlm_state_dict[key] return state_dict class TransformerEncoderFromPretrainedXLM(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) if getattr(args, 'init_decoder_only', False): # Don't load XLM weights for encoder if --init-decoder-only return assert hasattr(args, "pretrained_xlm_checkpoint"), ( "--pretrained-xlm-checkpoint must be specified to load Transformer " "encoder from pretrained XLM" ) xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights( state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint, ) self.load_state_dict(xlm_loaded_state_dict, strict=True) class TransformerDecoderFromPretrainedXLM(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn) if getattr(args, 'init_encoder_only', False): # Don't load XLM weights for decoder if --init-encoder-only return assert hasattr(args, "pretrained_xlm_checkpoint"), ( "--pretrained-xlm-checkpoint must be specified to load Transformer " "decoder from pretrained XLM" ) xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights( state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint, ) self.load_state_dict(xlm_loaded_state_dict, strict=True) @register_model_architecture( "transformer_from_pretrained_xlm", "transformer_from_pretrained_xlm" ) def base_architecture(args): transformer_base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/transformer_from_pretrained_xlm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq.models.levenshtein_transformer import ( _fill, _skip, _skip_encoder_out, _apply_ins_masks, _apply_ins_words, _apply_del_words, ) class _EnsembleModelEncoder(object): def __init__(self, models): self.models = models def reorder_encoder_out(self, encoder_outs, new_order): encoder_outs = [ model.encoder.reorder_encoder_out(encoder_out, new_order) for model, encoder_out in zip(self.models, encoder_outs) ] return encoder_outs class BasicEnsembleModel(torch.nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models = torch.nn.ModuleList(models) self.bos = self.models[0].decoder.dictionary.bos() self.eos = self.models[0].decoder.dictionary.eos() self.pad = self.models[0].decoder.dictionary.pad() self.unk = self.models[0].decoder.dictionary.unk() self.encoder = _EnsembleModelEncoder(self.models) def has_encoder(self): return hasattr(self.models[0], 'encoder') def max_decoder_positions(self): return min(m.max_decoder_positions() for m in self.models) @torch.no_grad() def forward_encoder(self, encoder_input): if not self.has_encoder(): return None return [model.forward_encoder(encoder_input) for model in self.models] @torch.no_grad() def forward_decoder(self, *inputs): raise NotImplementedError def initialize_output_tokens(self, *inputs): raise NotImplementedError class EnsembleLevT(BasicEnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) @torch.no_grad() def forward_decoder(self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs): # LevT ensembling # A pipeline of three steps: deletion, placeholder, and word insertion. # We need to average scores in each step in a pipeline way because of dependence. # deletion output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn bsz = output_tokens.size(0) if max_ratio is None: max_lens = output_tokens.new().fill_(255) else: if encoder_outs[0].encoder_padding_mask is None: src_lens = encoder_outs[0].encoder_out.new(bsz).fill_(encoder_outs[0].encoder_out.size(1)) else: src_lens = (~encoder_outs[0].encoder_padding_mask).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip output_tokens, output_scores, attn = self.forward_word_del( encoder_outs, output_tokens, output_scores, attn, can_del_word, ) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: output_tokens, output_scores = self.forward_mask_ins( encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens, ) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: output_tokens, output_scores, attn = self.forward_word_ins( encoder_outs, output_tokens, output_scores, attn, can_ins_word, ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=None ) def forward_word_del(self, encoder_outs, output_tokens, output_scores, attn, can_del_word): word_del_score_avg = [] word_del_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_del_out, word_del_attn = model.decoder.forward_word_del( _skip(output_tokens, can_del_word), _skip_encoder_out(model.encoder, encoder_out, can_del_word), ) word_del_score = F.log_softmax(word_del_out, 2) word_del_score_avg.append(word_del_score) word_del_attn_avg.append(word_del_attn) word_del_score_avg = torch.logsumexp(torch.stack(word_del_score_avg, dim=0), dim=0) - math.log(len(self.models)) word_del_pred = word_del_score_avg.max(-1)[1].bool() if word_del_attn_avg[0] is not None: word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0)/len(self.models) else: word_del_attn_avg = None _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn_avg, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.) return output_tokens, output_scores, attn def forward_mask_ins(self, encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens): mask_ins_score_avg = [] for model, encoder_out in zip(self.models, encoder_outs): mask_ins_out, _ = model.decoder.forward_mask_ins( _skip(output_tokens, can_ins_mask), _skip_encoder_out(model.encoder, encoder_out, can_ins_mask), ) mask_ins_score = F.log_softmax(mask_ins_out, 2) if eos_penalty > 0.0: mask_ins_score[:, :, 0] -= eos_penalty mask_ins_score_avg.append(mask_ins_score) mask_ins_score_avg = torch.logsumexp(torch.stack(mask_ins_score_avg, dim=0), dim=0) - math.log(len(self.models)) mask_ins_pred = mask_ins_score_avg.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) return output_tokens, output_scores def forward_word_ins(self, encoder_outs, output_tokens, output_scores, attn, can_ins_word): word_ins_score_avg = [] word_ins_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_ins_out, word_ins_attn = model.decoder.forward_word_ins( _skip(output_tokens, can_ins_word), _skip_encoder_out(model.encoder, encoder_out, can_ins_word), ) word_ins_score = F.log_softmax(word_ins_out, 2) word_ins_score_avg.append(word_ins_score) word_ins_attn_avg.append(word_ins_attn) word_ins_score_avg = torch.logsumexp(torch.stack(word_ins_score_avg, dim=0), dim=0) - math.log(len(self.models)) if word_ins_attn_avg[0] is not None: word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0)/len(self.models) else: word_ins_attn_avg = None word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score_max, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.) return output_tokens, output_scores, attn def initialize_output_tokens(self, encoder_outs, src_tokens): # LevT doesn't do length prediction. return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/nonautoregressive_ensembles.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def _mean_pooling(enc_feats, src_masks): # enc_feats: T x B x C # src_masks: B x T or None if src_masks is None: enc_feats = enc_feats.mean(0) else: src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats) enc_feats = ( (enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None] ).sum(0) return enc_feats def _argmax(x, dim): return (x == x.max(dim, keepdim=True)[0]).type_as(x) def _uniform_assignment(src_lens, trg_lens): max_trg_len = trg_lens.max() steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size # max_trg_len index_t = utils.new_arange(trg_lens, max_trg_len).float() index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len index_t = torch.round(index_t).long().detach() return index_t @register_model("nonautoregressive_transformer") class NATransformerModel(TransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.tgt_dict = decoder.dictionary self.bos = decoder.dictionary.bos() self.eos = decoder.dictionary.eos() self.pad = decoder.dictionary.pad() self.unk = decoder.dictionary.unk() @staticmethod def add_args(parser): TransformerModel.add_args(parser) parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) # length prediction parser.add_argument("--src-embedding-copy", action="store_true", help="copy encoder word embeddings as the initial input of the decoder") parser.add_argument("--pred-length-offset", action="store_true", help="predicting the length difference between the target and source sentences") parser.add_argument("--sg-length-pred", action="store_true", help="stop the gradients back-propagated from the length predictor") parser.add_argument("--length-loss-factor", type=float, help="weights on the length prediction loss") @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = NATransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder @classmethod def build_encoder(cls, args, src_dict, embed_tokens): encoder = TransformerEncoder(args, src_dict, embed_tokens) if getattr(args, "apply_bert_init", False): encoder.apply(init_bert_params) return encoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) length_out, length_tgt = self.decoder.forward_length_prediction( encoder_out, tgt_tokens ) word_ins_out, word_ins_tgt, word_ins_mask = self.decoder( prev_output_tokens, encoder_out=encoder_out, tgt_tokens=tgt_tokens ) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor } } def forward_encoder(self, encoder_inputs): return self.encoder(*encoder_inputs) def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.ne(self.pad) _scores, _tokens = self.decoder( output_tokens, encoder_out=encoder_out, decoding_format=decoding_format, step=step, ) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history ) def initialize_output_tokens(self, encoder_out, src_tokens): # length prediction _, length_tgt = self.decoder.forward_length_prediction(encoder_out) max_length = length_tgt.clamp_(min=2).max() idx_length = utils.new_arange(src_tokens, max_length) initial_output_tokens = src_tokens.new_zeros( src_tokens.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out.encoder_out) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None ) class NATransformerDecoder(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.encoder_embed_dim = args.encoder_embed_dim self.sg_length_pred = getattr(args, "sg_length_pred", False) self.pred_length_offset = getattr(args, "pred_length_offset", False) self.length_loss_factor = getattr(args, "length_loss_factor", 0.1) self.src_embedding_copy = getattr(args, "src_embedding_copy", False) self.embed_length = Embedding(256, self.encoder_embed_dim, None) def forward( self, prev_output_tokens, encoder_out=None, tgt_tokens=None, decoding_format=None, step=0, **kwargs ): features, _ = self.extract_features( prev_output_tokens, encoder_out=encoder_out, embedding_copy=(step == 0) & self.src_embedding_copy, ) if tgt_tokens is not None: word_ins_mask = tgt_tokens.ne(self.padding_idx) word_ins_tgt = tgt_tokens return self.output_layer(features), word_ins_tgt, word_ins_mask else: return F.log_softmax(self.output_layer(features), -1).max(-1) def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, embedding_copy=False, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embedding if embedding_copy: src_embd = encoder_out.encoder_embedding src_mask = encoder_out.encoder_padding_mask src_mask = ( ~src_mask if src_mask is not None else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool() ) x, decoder_padding_mask = self.forward_embedding( prev_output_tokens, self.forward_copying_source( src_embd, src_mask, prev_output_tokens.ne(self.padding_idx) ), ) else: x, decoder_padding_mask = self.forward_embedding(prev_output_tokens) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for i, layer in enumerate(self.layers): # early exit from the decoder. if (early_exit is not None) and (i >= early_exit): break x, attn = layer( x, encoder_out.encoder_out if encoder_out is not None else None, encoder_out.encoder_padding_mask if encoder_out is not None else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} def forward_embedding(self, prev_output_tokens, states=None): # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions if states is None: x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) else: x = states if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) return x, decoder_padding_mask def forward_copying_source(self, src_embeds, src_masks, tgt_masks): length_sources = src_masks.sum(1) length_targets = tgt_masks.sum(1) mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill( ~tgt_masks, 0 ) copied_embedding = torch.gather( src_embeds, 1, mapped_inputs.unsqueeze(-1).expand( *mapped_inputs.size(), src_embeds.size(-1) ), ) return copied_embedding def forward_length_prediction(self, encoder_out, tgt_tokens=None): enc_feats = encoder_out.encoder_out # T x B x C src_masks = encoder_out.encoder_padding_mask # B x T or None if self.pred_length_offset: if src_masks is None: src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_( enc_feats.size(0) ) else: src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0) src_lengs = src_lengs.long() enc_feats = _mean_pooling(enc_feats, src_masks) if self.sg_length_pred: enc_feats = enc_feats.detach() length_out = F.linear(enc_feats, self.embed_length.weight) if tgt_tokens is not None: # obtain the length target tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long() if self.pred_length_offset: length_tgt = tgt_lengs - src_lengs + 128 else: length_tgt = tgt_lengs length_tgt = length_tgt.clamp(min=0, max=255) else: # predict the length target (greedy for now) # TODO: implementing length-beam pred_lengs = length_out.max(-1)[1] if self.pred_length_offset: length_tgt = pred_lengs - 128 + src_lengs else: length_tgt = pred_lengs return length_out, length_tgt @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer" ) def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de" ) def nonautoregressive_transformer_wmt_en_de(args): base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect import torch.nn as nn from fairseq.legacy_distributed_data_parallel import LegacyDistributedDataParallel from fairseq.models import BaseFairseqModel def DistributedFairseqModel(args, model): """ Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap """ # determine which DDP class to extend assert isinstance(model, nn.Module) if args.ddp_backend == 'c10d': ddp_class = nn.parallel.DistributedDataParallel init_kwargs = dict( module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=False, bucket_cap_mb=args.bucket_cap_mb, ) # Maintain backward compatibility if 'check_reduction' in inspect.getargspec(ddp_class)[0]: init_kwargs['check_reduction'] = True if 'find_unused_parameters' in inspect.getargspec(ddp_class)[0]: init_kwargs['find_unused_parameters'] = args.find_unused_parameters elif args.ddp_backend == 'no_c10d': ddp_class = LegacyDistributedDataParallel init_kwargs = dict( module=model, world_size=args.distributed_world_size, buffer_size=2**28, ) else: raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend) class _DistributedFairseqModel(ddp_class): """Extend DistributedDataParallel to check for missing attributes in the wrapped module.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __getattr__(self, name): wrapped_module = super().__getattr__('module') if hasattr(wrapped_module, name): return getattr(wrapped_module, name) return super().__getattr__(name) return _DistributedFairseqModel(**init_kwargs)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/distributed_fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This file implements: Ghazvininejad, Marjan, et al. "Constant-time machine translation with conditional masked language models." arXiv preprint arXiv:1904.09324 (2019). """ from fairseq.models import register_model, register_model_architecture from fairseq.models.nonautoregressive_transformer import NATransformerModel from fairseq.utils import new_arange def _skeptical_unmasking(output_scores, output_masks, p): sorted_index = output_scores.sort(-1)[1] boundary_len = ( (output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p ).long() skeptical_mask = new_arange(output_masks) < boundary_len return skeptical_mask.scatter(1, sorted_index, skeptical_mask) @register_model("cmlm_transformer") class CMLMNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert not self.decoder.src_embedding_copy, "do not support embedding copy." encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) length_out, length_tgt = self.decoder.forward_length_prediction( encoder_out, tgt_tokens ) word_ins_out, word_ins_tgt, _ = self.decoder( prev_output_tokens, encoder_out=encoder_out, tgt_tokens=tgt_tokens ) word_ins_mask = prev_output_tokens.eq(self.unk) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor } } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step max_step = decoder_out.max_step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.eq(self.unk) _scores, _tokens = self.decoder( output_tokens, encoder_out=encoder_out, decoding_format=decoding_format ) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) # skeptical decoding (depend on the maximum decoding steps.) if (step + 1) < max_step: skeptical_mask = _skeptical_unmasking( output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step ) output_tokens.masked_fill_(skeptical_mask, self.unk) output_scores.masked_fill_(skeptical_mask, 0.0) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history ) @register_model_architecture("cmlm_transformer", "cmlm_transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture("cmlm_transformer", "cmlm_transformer_wmt_en_de") def iter_nat_wmt_en_de(args): base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/cmlm_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import namedtuple import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, TransformerDecoderLayer, TransformerEncoderLayer, ) import random DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } def moses_fastbpe(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'fastbpe', } return { 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'), 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2', 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'), 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'), 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'), 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'), 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'), 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'), 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'), } # fmt: on def __init__(self, args, encoder, decoder): super().__init__(encoder, decoder) self.args = args self.supports_align_args = True @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019) parser.add_argument('--no-cross-attention', default=False, action='store_true', help='do not perform cross-attention') parser.add_argument('--cross-self-attention', default=False, action='store_true', help='perform cross+self-attention') parser.add_argument('--layer-wise-attention', default=False, action='store_true', help='perform layer-wise attention (cross-attention or cross+self-attention)') # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for encoder') parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for decoder') parser.add_argument('--encoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') parser.add_argument('--decoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding') parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, 'max_source_positions', None) is None: args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if getattr(args, 'max_target_positions', None) is None: args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return cls(args, encoder, decoder) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder( args, tgt_dict, embed_tokens, no_encoder_attn=getattr(args, 'no_cross_attention', False), ) @register_model('transformer_align') class TransformerAlignModel(TransformerModel): """ See "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). """ def __init__(self, encoder, decoder, args): super().__init__(args, encoder, decoder) self.alignment_heads = args.alignment_heads self.alignment_layer = args.alignment_layer self.full_context_alignment = args.full_context_alignment @staticmethod def add_args(parser): # fmt: off super(TransformerAlignModel, TransformerAlignModel).add_args(parser) parser.add_argument('--alignment-heads', type=int, metavar='D', help='Number of cross attention heads per layer to supervised with alignments') parser.add_argument('--alignment-layer', type=int, metavar='D', help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.') parser.add_argument('--full-context-alignment', type=bool, metavar='D', help='Whether or not alignment is supervised conditioned on the full target context.') # fmt: on @classmethod def build_model(cls, args, task): # set any default arguments transformer_align(args) transformer_model = TransformerModel.build_model(args, task) return TransformerAlignModel(transformer_model.encoder, transformer_model.decoder, args) def forward(self, src_tokens, src_lengths, prev_output_tokens): encoder_out = self.encoder(src_tokens, src_lengths) return self.forward_decoder(prev_output_tokens, encoder_out) def forward_decoder( self, prev_output_tokens, encoder_out=None, incremental_state=None, features_only=False, **extra_args, ): attn_args = {'alignment_layer': self.alignment_layer, 'alignment_heads': self.alignment_heads} decoder_out = self.decoder( prev_output_tokens, encoder_out, **attn_args, **extra_args, ) if self.full_context_alignment: attn_args['full_context_alignment'] = self.full_context_alignment _, alignment_out = self.decoder( prev_output_tokens, encoder_out, features_only=True, **attn_args, **extra_args, ) decoder_out[1]['attn'] = alignment_out['attn'] return decoder_out EncoderOut = namedtuple('TransformerEncoderOut', [ 'encoder_out', # T x B x C 'encoder_padding_mask', # B x T 'encoder_embedding', # B x T x C 'encoder_states', # List[T x B x C] ]) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.encoder_layerdrop = args.encoder_layerdrop embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layer_wise_attention = getattr(args, 'layer_wise_attention', False) self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None if getattr(args, 'layernorm_embedding', False): self.layernorm_embedding = LayerNorm(embed_dim) else: self.layernorm_embedding = None def forward_embedding(self, src_tokens): # embed tokens and positions x = embed = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) if self.layernorm_embedding: x = self.layernorm_embedding(x) x = F.dropout(x, p=self.dropout, training=self.training) return x, embed def forward(self, src_tokens, src_lengths, cls_input=None, return_all_hiddens=False, **unused): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: namedtuple: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ if self.layer_wise_attention: return_all_hiddens = True x, encoder_embedding = self.forward_embedding(src_tokens) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None encoder_states = [] if return_all_hiddens else None # encoder layers for layer in self.layers: # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not self.training or (dropout_probability > self.encoder_layerdrop): x = layer(x, encoder_padding_mask) if return_all_hiddens: encoder_states.append(x) if self.layer_norm: x = self.layer_norm(x) if return_all_hiddens: encoder_states[-1] = x return EncoderOut( encoder_out=x, # T x B x C encoder_padding_mask=encoder_padding_mask, # B x T encoder_embedding=encoder_embedding, # B x T x C encoder_states=encoder_states, # List[T x B x C] ) def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out.encoder_out is not None: encoder_out = encoder_out._replace( encoder_out=encoder_out.encoder_out.index_select(1, new_order) ) if encoder_out.encoder_padding_mask is not None: encoder_out = encoder_out._replace( encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(0, new_order) ) if encoder_out.encoder_embedding is not None: encoder_out = encoder_out._replace( encoder_embedding=encoder_out.encoder_embedding.index_select(0, new_order) ) if encoder_out.encoder_states is not None: for idx, state in enumerate(encoder_out.encoder_states): encoder_out.encoder_states[idx] = state.index_select(1, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: print('deleting {0}'.format(weights_key)) del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.decoder_layerdrop = args.decoder_layerdrop self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim self.padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, self.padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.cross_self_attention = getattr(args, 'cross_self_attention', False) self.layer_wise_attention = getattr(args, 'layer_wise_attention', False) self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None if getattr(args, 'layernorm_embedding', False): self.layernorm_embedding = LayerNorm(embed_dim) else: self.layernorm_embedding = None def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, features_only=False, **extra_args ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, **extra_args ) if not features_only: x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, full_context_alignment=False, alignment_layer=None, alignment_heads=None, **unused, ): """ Similar to *forward* but only return features. Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). alignment_layer (int, optional): return mean alignment over heads at this layer (default: last layer). alignment_heads (int, optional): only average alignment over this many heads (default: all heads). Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ if alignment_layer is None: alignment_layer = len(self.layers) - 1 # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions if self.layernorm_embedding: x = self.layernorm_embedding(x) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) self_attn_padding_mask = None if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # decoder layers attn = None inner_states = [x] for idx, layer in enumerate(self.layers): encoder_state = None if encoder_out is not None: if self.layer_wise_attention: encoder_state = encoder_out.encoder_states[idx] else: encoder_state = encoder_out.encoder_out if incremental_state is None and not full_context_alignment: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not self.training or (dropout_probability > self.decoder_layerdrop): x, layer_attn = layer( x, encoder_state, encoder_out.encoder_padding_mask if encoder_out is not None else None, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=(idx == alignment_layer), need_head_weights=(idx == alignment_layer), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float() if attn is not None: if alignment_heads is not None: attn = attn[:alignment_heads] # average probabilities over heads attn = attn.mean(dim=0) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim ): self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.no_cross_attention = getattr(args, 'no_cross_attention', False) args.cross_self_attention = getattr(args, 'cross_self_attention', False) args.layer_wise_attention = getattr(args, 'layer_wise_attention', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.no_scale_embedding = getattr(args, 'no_scale_embedding', False) args.layernorm_embedding = getattr(args, 'layernorm_embedding', False) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformer', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer_align', 'transformer_align') def transformer_align(args): args.alignment_heads = getattr(args, 'alignment_heads', 1) args.alignment_layer = getattr(args, 'alignment_layer', 4) args.full_context_alignment = getattr(args, 'full_context_alignment', False) base_architecture(args) @register_model_architecture('transformer_align', 'transformer_wmt_en_de_big_align') def transformer_wmt_en_de_big_align(args): args.alignment_heads = getattr(args, 'alignment_heads', 1) args.alignment_layer = getattr(args, 'alignment_layer', 4) transformer_wmt_en_de_big(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import options, utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( Embedding, TransformerDecoder, ) from fairseq.modules import ( AdaptiveInput, CharacterTokenEmbedder, ) DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model('transformer_lm') class TransformerLanguageModel(FairseqLanguageModel): @classmethod def hub_models(cls): def moses_fastbpe(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'fastbpe', } return { 'transformer_lm.gbw.adaptive_huge': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2', 'transformer_lm.wiki103.adaptive': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.tar.bz2', 'transformer_lm.wmt19.en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2'), 'transformer_lm.wmt19.de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2'), 'transformer_lm.wmt19.ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2'), } def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension') parser.add_argument('--decoder-input-dim', type=int, metavar='N', help='decoder input dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--no-decoder-final-norm', action='store_true', help='don\'t add an extra layernorm after the last decoder block') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor') parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--character-embeddings', action='store_true', help='if set, uses character embedding convolutions to produce token embeddings') parser.add_argument('--character-filters', type=str, metavar='LIST', default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', help='size of character embeddings') parser.add_argument('--character-embedding-dim', default=4, type=int, metavar='N', help='size of character embeddings') parser.add_argument('--char-embedder-highway-layers', default=2, type=int, metavar='N', help='number of highway layers for character token embeddder') parser.add_argument('--adaptive-input', action='store_true', help='if set, uses adaptive input') parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor') parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.') parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input') parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for decoder') parser.add_argument('--decoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding') parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, 'max_target_positions', None) is None: args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS) if args.character_embeddings: embed_tokens = CharacterTokenEmbedder( task.source_dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, ) elif args.adaptive_input: embed_tokens = AdaptiveInput( len(task.source_dictionary), task.source_dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int), ) else: embed_tokens = Embedding(len(task.source_dictionary), args.decoder_input_dim, task.source_dictionary.pad()) if args.tie_adaptive_weights: assert args.adaptive_input assert args.adaptive_input_factor == args.adaptive_softmax_factor assert args.adaptive_softmax_cutoff == args.adaptive_input_cutoff, '{} != {}'.format( args.adaptive_softmax_cutoff, args.adaptive_input_cutoff) assert args.decoder_input_dim == args.decoder_output_dim decoder = TransformerDecoder( args, task.target_dictionary, embed_tokens, no_encoder_attn=True, ) return TransformerLanguageModel(decoder) @register_model_architecture('transformer_lm', 'transformer_lm') def base_lm_architecture(args): # backward compatibility for older model checkpoints if hasattr(args, 'no_tie_adaptive_proj'): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if hasattr(args, 'decoder_final_norm'): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.0) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.add_bos_token = getattr(args, 'add_bos_token', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.character_embeddings = getattr(args, 'character_embeddings', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4) args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None) args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False) args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False) args.no_scale_embedding = getattr(args, 'no_scale_embedding', False) args.layernorm_embedding = getattr(args, 'layernorm_embedding', False) @register_model_architecture('transformer_lm', 'transformer_lm_big') def transformer_lm_big(args): args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) base_lm_architecture(args) @register_model_architecture('transformer_lm', 'transformer_lm_wiki103') @register_model_architecture('transformer_lm', 'transformer_lm_baevski_wiki103') def transformer_lm_baevski_wiki103(args): args.decoder_layers = getattr(args, 'decoder_layers', 16) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.dropout = getattr(args, 'dropout', 0.3) args.adaptive_input = getattr(args, 'adaptive_input', True) args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', True) args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', '20000,60000') args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '20000,60000') args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0.2) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True) args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', True) transformer_lm_big(args) @register_model_architecture('transformer_lm', 'transformer_lm_gbw') @register_model_architecture('transformer_lm', 'transformer_lm_baevski_gbw') def transformer_lm_baevski_gbw(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True) transformer_lm_big(args) @register_model_architecture('transformer_lm', 'transformer_lm_gpt') def transformer_lm_gpt(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args) @register_model_architecture('transformer_lm', 'transformer_lm_gpt2_small') def transformer_lm_gpt2_small(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_layers = getattr(args, 'decoder_layers', 24) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args) @register_model_architecture('transformer_lm', 'transformer_lm_gpt2_medium') def transformer_lm_gpt2_medium(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1280) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 5120) args.decoder_layers = getattr(args, 'decoder_layers', 36) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 20) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args) @register_model_architecture('transformer_lm', 'transformer_lm_gpt2_big') def transformer_lm_gpt2_big(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1600) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 6400) args.decoder_layers = getattr(args, 'decoder_layers', 48) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 25) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/transformer_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, BeamableMM, GradMultiply, LearnedPositionalEmbedding, LinearizedConvolution, ) @register_model('fconv') class FConvModel(FairseqEncoderDecoderModel): """ A fully convolutional model, i.e. a convolutional encoder and a convolutional decoder, as described in `"Convolutional Sequence to Sequence Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_. Args: encoder (FConvEncoder): the encoder decoder (FConvDecoder): the decoder The Convolutional model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.fconv_parser :prog: """ @classmethod def hub_models(cls): def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } return { 'conv.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2'), 'conv.wmt14.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2'), 'conv.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2'), } def __init__(self, encoder, decoder): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--share-input-output-embed', action='store_true', help='share input and output embeddings (requires' ' --decoder-out-embed-dim and --decoder-embed-dim' ' to be equal)') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) encoder_embed_dict = None if args.encoder_embed_path: encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path) utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary) decoder_embed_dict = None if args.decoder_embed_path: decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path) utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary) encoder = FConvEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, embed_dict=encoder_embed_dict, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, ) decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, embed_dict=decoder_embed_dict, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, share_embed=args.share_input_output_embed, ) return FConvModel(encoder, decoder) class FConvEncoder(FairseqEncoder): """ Convolutional encoder consisting of `len(convolutions)` layers. Args: dictionary (~fairseq.data.Dictionary): encoding dictionary embed_dim (int, optional): embedding dimension embed_dict (str, optional): filename from which to load pre-trained embeddings max_positions (int, optional): maximum supported input sequence length convolutions (list, optional): the convolutional layer structure. Each list item `i` corresponds to convolutional layer `i`. Layers are given as ``(out_channels, kernel_width, [residual])``. Residual connections are added between layers when ``residual=1`` (which is the default behavior). dropout (float, optional): dropout to be applied before each conv layer """ def __init__( self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, ): super().__init__(dictionary) self.dropout = dropout self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for _, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append(Linear(residual_dim, out_channels) if residual_dim != out_channels else None) if kernel_size % 2 == 1: padding = kernel_size // 2 else: padding = 0 self.convolutions.append( ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout, padding=padding) ) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (tuple): a tuple with two elements, where the first element is the last encoder layer's output and the second element is the same quantity summed with the input embedding (used for attention). The shape of both tensors is `(batch, src_len, embed_dim)`. - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) input_embedding = x # project to size of convolution x = self.fc1(x) # used to mask padding in input encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) residuals = [x] # temporal convolutions for proj, conv, res_layer in zip(self.projections, self.convolutions, self.residuals): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = F.dropout(x, p=self.dropout, training=self.training) if conv.kernel_size[0] % 2 == 1: # padding is implicit in the conv x = conv(x) else: padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding) * math.sqrt(0.5) return { 'encoder_out': (x, y), 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = ( encoder_out['encoder_out'][0].index_select(0, new_order), encoder_out['encoder_out'][1].index_select(0, new_order), ) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions() class AttentionLayer(nn.Module): def __init__(self, conv_channels, embed_dim, bmm=None): super().__init__() # projects from output of convolution to embedding dimension self.in_projection = Linear(conv_channels, embed_dim) # projects from embedding dimension to convolution size self.out_projection = Linear(embed_dim, conv_channels) self.bmm = bmm if bmm is not None else torch.bmm def forward(self, x, target_embedding, encoder_out, encoder_padding_mask): residual = x # attention x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5) x = self.bmm(x, encoder_out[0]) # don't attend over padding if encoder_padding_mask is not None: x = x.float().masked_fill( encoder_padding_mask.unsqueeze(1), float('-inf') ).type_as(x) # FP16 support: cast to float and back # softmax over last dim sz = x.size() x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1) x = x.view(sz) attn_scores = x x = self.bmm(x, encoder_out[1]) # scale attention output (respecting potentially different lengths) s = encoder_out[1].size(1) if encoder_padding_mask is None: x = x * (s * math.sqrt(1.0 / s)) else: s = s - encoder_padding_mask.type_as(x).sum(dim=1, keepdim=True) # exclude padding s = s.unsqueeze(-1) x = x * (s * s.rsqrt()) # project back x = (self.out_projection(x) + residual) * math.sqrt(0.5) return x, attn_scores def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs): """Replace torch.bmm with BeamableMM.""" if beamable_mm_beam_size is not None: del self.bmm self.add_module('bmm', BeamableMM(beamable_mm_beam_size)) class FConvDecoder(FairseqIncrementalDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 20, attention=True, dropout=0.1, share_embed=False, positional_embeddings=True, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, ): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([2])) self.dropout = dropout self.need_attn = True convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] if isinstance(attention, bool): # expand True into [True, True, ...] and do the same with False attention = [attention] * len(convolutions) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError('Attention is expected to be a list of booleans of ' 'length equal to the number of layers.') num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, padding_idx, ) if positional_embeddings else None self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for i, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append(Linear(residual_dim, out_channels) if residual_dim != out_channels else None) self.convolutions.append( LinearizedConv1d(in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout) ) self.attention.append(AttentionLayer(out_channels, embed_dim) if attention[i] else None) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.adaptive_softmax = None self.fc2 = self.fc3 = None if adaptive_softmax_cutoff is not None: assert not share_embed self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, in_channels, adaptive_softmax_cutoff, dropout=adaptive_softmax_dropout) else: self.fc2 = Linear(in_channels, out_embed_dim) if share_embed: assert out_embed_dim == embed_dim, \ "Shared embed weights implies same dimensions " \ " out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim) self.fc3 = nn.Linear(out_embed_dim, num_embeddings) self.fc3.weight = self.embed_tokens.weight else: self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused): if encoder_out is not None: encoder_padding_mask = encoder_out['encoder_padding_mask'] encoder_out = encoder_out['encoder_out'] # split and transpose encoder outputs encoder_a, encoder_b = self._split_encoder_out(encoder_out, incremental_state) if self.embed_positions is not None: pos_embed = self.embed_positions(prev_output_tokens, incremental_state) else: pos_embed = 0 if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] x = self._embed_tokens(prev_output_tokens, incremental_state) # embed tokens and combine with positional embeddings x += pos_embed x = F.dropout(x, p=self.dropout, training=self.training) target_embedding = x # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = self._transpose_if_training(x, incremental_state) # temporal convolutions avg_attn_scores = None num_attn_layers = len(self.attention) residuals = [x] for proj, conv, attention, res_layer in zip(self.projections, self.convolutions, self.attention, self.residuals): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None x = F.dropout(x, p=self.dropout, training=self.training) x = conv(x, incremental_state) x = F.glu(x, dim=2) # attention if attention is not None: x = self._transpose_if_training(x, incremental_state) x, attn_scores = attention(x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask) if not self.training and self.need_attn: attn_scores = attn_scores / num_attn_layers if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) x = self._transpose_if_training(x, incremental_state) # residual if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = self._transpose_if_training(x, incremental_state) # project back to size of vocabulary if not using adaptive softmax if self.fc2 is not None and self.fc3 is not None: x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.fc3(x) return x, avg_attn_scores def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) encoder_out = utils.get_incremental_state(self, incremental_state, 'encoder_out') if encoder_out is not None: encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out) utils.set_incremental_state(self, incremental_state, 'encoder_out', encoder_out) def max_positions(self): """Maximum output length supported by the decoder.""" return self.embed_positions.max_positions() if self.embed_positions is not None else float('inf') def upgrade_state_dict(self, state_dict): if utils.item(state_dict.get('decoder.version', torch.Tensor([1]))[0]) < 2: # old models use incorrect weight norm dimension for i, conv in enumerate(self.convolutions): # reconfigure weight norm nn.utils.remove_weight_norm(conv) self.convolutions[i] = nn.utils.weight_norm(conv, dim=0) state_dict['decoder.version'] = torch.Tensor([1]) return state_dict def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _embed_tokens(self, tokens, incremental_state): if incremental_state is not None: # keep only the last token for incremental forward pass tokens = tokens[:, -1:] return self.embed_tokens(tokens) def _split_encoder_out(self, encoder_out, incremental_state): """Split and transpose encoder outputs. This is cached when doing incremental inference. """ cached_result = utils.get_incremental_state(self, incremental_state, 'encoder_out') if cached_result is not None: return cached_result # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(1, 2).contiguous() result = (encoder_a, encoder_b) if incremental_state is not None: utils.set_incremental_state(self, incremental_state, 'encoder_out', result) return result def _transpose_if_training(self, x, incremental_state): if incremental_state is None: x = x.transpose(0, 1) return x def extend_conv_spec(convolutions): """ Extends convolutional spec that is a list of tuples of 2 or 3 parameters (kernel size, dim size and optionally how many layers behind to look for residual) to default the residual propagation param if it is not specified """ extended = [] for spec in convolutions: if len(spec) == 3: extended.append(spec) elif len(spec) == 2: extended.append(spec + (1,)) else: raise Exception('invalid number of parameters in convolution spec ' + str(spec) + '. expected 2 or 3') return tuple(extended) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, dropout=0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features)) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m) def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) @register_model_architecture('fconv', 'fconv') def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 20') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 20') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_attention = getattr(args, 'decoder_attention', 'True') args.share_input_output_embed = getattr(args, 'share_input_output_embed', False) @register_model_architecture('fconv', 'fconv_iwslt_de_en') def fconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_layers = getattr(args, 'encoder_layers', '[(256, 3)] * 4') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256) args.decoder_layers = getattr(args, 'decoder_layers', '[(256, 3)] * 3') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) base_architecture(args) @register_model_architecture('fconv', 'fconv_wmt_en_ro') def fconv_wmt_en_ro(args): args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) base_architecture(args) @register_model_architecture('fconv', 'fconv_wmt_en_de') def fconv_wmt_en_de(args): convs = '[(512, 3)] * 9' # first 9 layers have 512 units convs += ' + [(1024, 3)] * 4' # next 4 layers have 1024 units convs += ' + [(2048, 1)] * 2' # final 2 layers use 1x1 convolutions args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_layers = getattr(args, 'encoder_layers', convs) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768) args.decoder_layers = getattr(args, 'decoder_layers', convs) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) base_architecture(args) @register_model_architecture('fconv', 'fconv_wmt_en_fr') def fconv_wmt_en_fr(args): convs = '[(512, 3)] * 6' # first 6 layers have 512 units convs += ' + [(768, 3)] * 4' # next 4 layers have 768 units convs += ' + [(1024, 3)] * 3' # next 3 layers have 1024 units convs += ' + [(2048, 1)] * 1' # next 1 layer uses 1x1 convolutions convs += ' + [(4096, 1)] * 1' # final 1 layer uses 1x1 convolutions args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_layers = getattr(args, 'encoder_layers', convs) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768) args.decoder_layers = getattr(args, 'decoder_layers', convs) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/fconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import math import torch import torch.nn as nn import torch.nn.functional as F from . import ( BaseFairseqModel, register_model, register_model_architecture ) @register_model('wav2vec') class Wav2VecModel(BaseFairseqModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--prediction-steps', type=int, metavar='N', help='number of steps ahead to predict') parser.add_argument('--sample-distance', type=int, metavar='N', help='sample distance from target. does not work properly with cross-sampling') parser.add_argument('--cross-sample-negatives', action='store_true', help='whether to sample negatives across examples in the same batch') parser.add_argument('--num-negatives', type=int, metavar='N', help='number of negative examples') parser.add_argument('--conv-feature-layers', type=str, metavar='EXPR', help='convolutional feature extraction layers [(dim, kernel_size, stride), ...]') parser.add_argument('--conv-aggregator-layers', type=str, metavar='EXPR', help='convolutional feature extraction layers [(dim, kernel_size, stride), ...]') parser.add_argument('--dropout', type=float, metavar='D', help='dropout to apply within the model') parser.add_argument('--dropout-features', type=float, metavar='D', help='dropout to apply to the features') parser.add_argument('--dropout-agg', type=float, metavar='D', help='dropout to apply after aggregation step') parser.add_argument('--encoder', type=str, choices=['cnn'], help='type of encoder to use') parser.add_argument('--aggregator', type=str, choices=['cnn', 'gru'], help='type of aggregator to use') parser.add_argument('--gru-dim', type=int, metavar='N', help='GRU dimensionality') parser.add_argument('--no-conv-bias', action='store_true', help='if set, does not learn bias for conv layers') parser.add_argument('--agg-zero-pad', action='store_true', help='if set, zero pads in aggregator instead of repl pad') parser.add_argument('--skip-connections-feat', action='store_true', help='if set, adds skip connections to the feature extractor') parser.add_argument('--skip-connections-agg', action='store_true', help='if set, adds skip connections to the aggregator') parser.add_argument('--residual-scale', type=float, metavar='D', help='scales residual by sqrt(value)') parser.add_argument('--log-compression', action='store_true', help='if set, adds a log compression to feature extractor') parser.add_argument('--balanced-classes', action='store_true', help='if set, loss is scaled to balance for number of negatives') parser.add_argument('--project-features', choices=['none', 'same', 'new'], help='if not none, features are projected using the (same or new) aggregator') parser.add_argument('--non-affine-group-norm', action='store_true', help='if set, group norm is not affine') parser.add_argument('--offset', help='if set, introduces an offset from target to predictions. ' 'if set to "auto", it is computed automatically from the receptive field') @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_wav2vec_architecture(args) model = Wav2VecModel(args) print(model) return model def __init__(self, args): super().__init__() self.prediction_steps = args.prediction_steps offset = args.offset if args.encoder == 'cnn': feature_enc_layers = eval(args.conv_feature_layers) self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0., log_compression=args.log_compression, skip_connections=args.skip_connections_feat, residual_scale=args.residual_scale, non_affine_group_norm=args.non_affine_group_norm, ) embed = feature_enc_layers[-1][0] else: raise Exception('unknown encoder type ' + args.encoder) if args.offset == 'auto': assert args.encoder == 'cnn' jin = 0 rin = 0 for _, k, stride in feature_enc_layers: if rin == 0: rin = k rin = rin + (k - 1) * jin if jin == 0: jin = stride else: jin *= stride offset = math.ceil(rin / jin) offset = int(offset) def make_aggregator(): if args.aggregator == 'cnn': agg_layers = eval(args.conv_aggregator_layers) agg_dim = agg_layers[-1][0] feature_aggregator = ConvAggegator( conv_layers=agg_layers, embed=embed, dropout=args.dropout, skip_connections=args.skip_connections_agg, residual_scale=args.residual_scale, non_affine_group_norm=args.non_affine_group_norm, conv_bias=not args.no_conv_bias, zero_pad=args.agg_zero_pad, ) elif args.aggregator == 'gru': agg_dim = args.gru_dim feature_aggregator = nn.Sequential( TransposeLast(), nn.GRU( input_size=embed, hidden_size=agg_dim, num_layers=1, dropout=args.dropout, ), TransposeLast(deconstruct_idx=0), ) else: raise Exception('unknown aggregator type ' + args.aggregator) return feature_aggregator, agg_dim self.feature_aggregator, agg_dim = make_aggregator() self.wav2vec_predictions = Wav2VecPredictionsModel( in_dim=agg_dim, out_dim=embed, prediction_steps=args.prediction_steps, n_negatives=args.num_negatives, cross_sample_negatives=args.cross_sample_negatives, sample_distance=args.sample_distance, dropout=args.dropout, offset=offset, balanced_classes=args.balanced_classes, ) self.dropout_feats = nn.Dropout(p=args.dropout_features) self.dropout_agg = nn.Dropout(p=args.dropout_agg) if args.project_features == 'none': self.project_features = None elif args.project_features == 'same': self.project_features = self.feature_aggregator elif args.project_features == 'new': self.project_features, _ = make_aggregator() def forward(self, source): result = {} features = self.feature_extractor(source) x = self.dropout_feats(features) x = self.feature_aggregator(x) x = self.dropout_agg(x) if self.project_features is not None: features = self.project_features(features) x, targets = self.wav2vec_predictions(x, features) result['cpc_logits'] = x result['cpc_targets'] = targets return result def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) def max_positions(self): """Maximum length supported by the model.""" return sys.maxsize def get_logits(self, net_output): logits = net_output['cpc_logits'] return logits def get_targets(self, sample, net_output, expand_steps=True): t = net_output['cpc_targets'] return t.contiguous() def get_target_weights(self, targets, net_output): targets = net_output['cpc_targets'] if isinstance(targets, tuple) and targets[-1] is not None: return targets[-1] return 1. class TransposeLast(nn.Module): def __init__(self, deconstruct_idx=None): super().__init__() self.deconstruct_idx = deconstruct_idx def forward(self, x): if self.deconstruct_idx is not None: x = x[self.deconstruct_idx] return x.transpose(-2, -1) class Fp32GroupNorm(nn.GroupNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.group_norm( input.float(), self.num_groups, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps) return output.type_as(input) class Fp32LayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.layer_norm( input.float(), self.normalized_shape, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps) return output.type_as(input) def norm_block(is_layer_norm, dim, affine=True): if is_layer_norm: mod = nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=affine), TransposeLast(), ) else: mod = Fp32GroupNorm(1, dim, affine=affine) return mod class ConvFeatureExtractionModel(nn.Module): def __init__(self, conv_layers, dropout, log_compression, skip_connections, residual_scale, non_affine_group_norm): super().__init__() def block(n_in, n_out, k, stride): return nn.Sequential( nn.Conv1d(n_in, n_out, k, stride=stride, bias=False), nn.Dropout(p=dropout), norm_block(is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm), nn.ReLU(), ) in_d = 1 self.conv_layers = nn.ModuleList() for i, (dim, k, stride) in enumerate(conv_layers): self.conv_layers.append( block(in_d, dim, k, stride)) in_d = dim self.log_compression = log_compression self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: residual = x x = conv(x) if self.skip_connections and x.size(1) == residual.size(1): tsz = x.size(2) r_tsz = residual.size(2) residual = residual[..., ::r_tsz // tsz][..., :tsz] x = (x + residual) * self.residual_scale if self.log_compression: x = x.abs() x = x + 1 x = x.log() return x class ZeroPad1d(nn.Module): def __init__(self, pad_left, pad_right): super().__init__() self.pad_left = pad_left self.pad_right = pad_right def forward(self, x): return F.pad(x, (self.pad_left, self.pad_right)) class ConvAggegator(nn.Module): def __init__(self, conv_layers, embed, dropout, skip_connections, residual_scale, non_affine_group_norm, conv_bias, zero_pad): super().__init__() def block(n_in, n_out, k, stride): # padding dims only really make sense for stride = 1 ka = k // 2 kb = ka - 1 if k % 2 == 0 else ka pad = ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0)) return nn.Sequential( pad, nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias), nn.Dropout(p=dropout), norm_block(False, n_out, affine=not non_affine_group_norm), nn.ReLU(), ) in_d = embed self.conv_layers = nn.ModuleList() self.residual_proj = nn.ModuleList() for i, (dim, k, stride) in enumerate(conv_layers): if in_d != dim and skip_connections: self.residual_proj.append( nn.Conv1d(in_d, dim, 1, bias=False), ) else: self.residual_proj.append(None) self.conv_layers.append( block(in_d, dim, k, stride)) in_d = dim self.conv_layers = nn.Sequential(*self.conv_layers) self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): for rproj, conv in zip(self.residual_proj, self.conv_layers): residual = x x = conv(x) if self.skip_connections: if rproj is not None: residual = rproj(residual) x = (x + residual) * self.residual_scale return x class Wav2VecPredictionsModel(nn.Module): def __init__(self, in_dim, out_dim, prediction_steps, n_negatives, cross_sample_negatives, sample_distance, dropout, offset, balanced_classes): super().__init__() self.n_negatives = n_negatives self.cross_sample_negatives = cross_sample_negatives self.sample_distance = sample_distance self.project_to_steps = nn.ConvTranspose2d(in_dim, out_dim, (1, prediction_steps)) self.dropout = nn.Dropout(p=dropout) self.offset = offset self.balanced_classes = balanced_classes def sample_negatives(self, y): bsz, fsz, tsz = y.shape y = y.transpose(0, 1) # BCT -> CBT y = y.contiguous().view(fsz, -1) # CBT => C(BxT) if self.cross_sample_negatives: high = tsz * bsz assert self.sample_distance is None, 'sample distance is not supported with cross sampling' else: high = tsz if self.sample_distance is None else min(tsz, self.sample_distance) neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz)) if self.sample_distance is not None and self.sample_distance < tsz: neg_idxs += torch.cat( [torch.arange(start=1, end=tsz - self.sample_distance, device=neg_idxs.device, dtype=neg_idxs.dtype), torch.arange(start=tsz - self.sample_distance, end=tsz - self.sample_distance * 2 - 1, step=-1, device=neg_idxs.device, dtype=neg_idxs.dtype)]) if not self.cross_sample_negatives: for i in range(1, bsz): neg_idxs[i] += i * high negs = y[..., neg_idxs.view(-1)] negs = negs.view(fsz, bsz, self.n_negatives, tsz).permute(2, 1, 0, 3) # to NxBxCxT return negs def forward(self, x, y): negatives = self.sample_negatives(y) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) x = x.unsqueeze(-1) x = self.project_to_steps(x) # BxCxTxS x = self.dropout(x) x = x.unsqueeze(0).expand(targets.size(0), -1, -1, -1, -1) copies, bsz, dim, tsz, steps = x.shape steps = min(steps, tsz - self.offset) predictions = x.new(bsz * copies * (tsz - self.offset + 1) * steps - ((steps + 1) * steps // 2) * copies * bsz) labels = torch.zeros_like(predictions) weights = torch.full_like(labels, 1 / self.n_negatives) if self.balanced_classes else None start = end = 0 for i in range(steps): offset = i + self.offset end = start + (tsz - offset) * bsz * copies pos_num = (end - start) // copies predictions[start:end] = (x[..., :-offset, i] * targets[..., offset:]).sum(dim=2).flatten() labels[start:start + pos_num] = 1. if weights is not None: weights[start:start + pos_num] = 1. start = end assert end == predictions.numel(), '{} != {}'.format(end, predictions.numel()) if weights is not None: labels = (labels, weights) return predictions, labels @register_model_architecture('wav2vec', 'wav2vec') def base_wav2vec_architecture(args): conv_feature_layers = '[(512, 10, 5)]' conv_feature_layers += ' + [(512, 8, 4)]' conv_feature_layers += ' + [(512, 4, 2)] * 3' args.conv_feature_layers = getattr(args, 'conv_feature_layers', conv_feature_layers) args.conv_aggregator_layers = getattr(args, 'conv_aggregator_layers', '[(512, 3, 1)] * 9') args.prediction_steps = getattr(args, 'prediction_steps', 12) args.num_negatives = getattr(args, 'num_negatives', 1) args.sample_distance = getattr(args, 'sample_distance', None) args.cross_sample_negatives = getattr(args, 'cross_sample_negatives', False) args.dropout = getattr(args, 'dropout', 0.) args.dropout_features = getattr(args, 'dropout_features', 0.) args.dropout_agg = getattr(args, 'dropout_agg', 0.) args.encoder = getattr(args, 'encoder', 'cnn') args.aggregator = getattr(args, 'aggregator', 'cnn') args.skip_connections_feat = getattr(args, 'skip_connections_feat', False) args.skip_connections_agg = getattr(args, 'skip_connections_agg', False) args.residual_scale = getattr(args, 'residual_scale', 0.5) args.gru_dim = getattr(args, 'gru_dim', 512) args.no_conv_bias = getattr(args, 'no_conv_bias', False) args.agg_zero_pad = getattr(args, 'agg_zero_pad', False) args.log_compression = getattr(args, 'log_compression', False) args.balanced_classes = getattr(args, 'balanced_classes', False) args.project_features = getattr(args, 'project_features', 'none') args.non_affine_group_norm = getattr(args, 'non_affine_group_norm', False) args.offset = getattr(args, 'offset', 'auto')
data2vec_vision-main
infoxlm/fairseq/fairseq/models/wav2vec.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax @register_model('lstm') class LSTMModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-freeze-embed', action='store_true', help='freeze encoder embeddings') parser.add_argument('--encoder-hidden-size', type=int, metavar='N', help='encoder hidden size') parser.add_argument('--encoder-layers', type=int, metavar='N', help='number of encoder layers') parser.add_argument('--encoder-bidirectional', action='store_true', help='make all layers of encoder bidirectional') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-freeze-embed', action='store_true', help='freeze decoder embeddings') parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size') parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', default=False, action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') # Granular dropout settings (if not specified these default to --dropout) parser.add_argument('--encoder-dropout-in', type=float, metavar='D', help='dropout probability for encoder input embedding') parser.add_argument('--encoder-dropout-out', type=float, metavar='D', help='dropout probability for encoder output') parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding') parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) if args.encoder_layers != args.decoder_layers: raise ValueError('--encoder-layers must match --decoder-layers') def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) if args.encoder_embed_path: pretrained_encoder_embed = load_pretrained_embedding_from_file( args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim) else: num_embeddings = len(task.source_dictionary) pretrained_encoder_embed = Embedding( num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad() ) if args.share_all_embeddings: # double check all parameters combinations are valid if task.source_dictionary != task.target_dictionary: raise ValueError('--share-all-embeddings requires a joint dictionary') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError( '--share-all-embed not compatible with --decoder-embed-path' ) if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to ' 'match --decoder-embed-dim' ) pretrained_decoder_embed = pretrained_encoder_embed args.share_decoder_input_output_embed = True else: # separate decoder input embeddings pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim ) # one last double check of parameter combinations if args.share_decoder_input_output_embed and ( args.decoder_embed_dim != args.decoder_out_embed_dim): raise ValueError( '--share-decoder-input-output-embeddings requires ' '--decoder-embed-dim to match --decoder-out-embed-dim' ) if args.encoder_freeze_embed: pretrained_encoder_embed.weight.requires_grad = False if args.decoder_freeze_embed: pretrained_decoder_embed.weight.requires_grad = False encoder = LSTMEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, hidden_size=args.encoder_hidden_size, num_layers=args.encoder_layers, dropout_in=args.encoder_dropout_in, dropout_out=args.encoder_dropout_out, bidirectional=args.encoder_bidirectional, pretrained_embed=pretrained_encoder_embed, ) decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=options.eval_bool(args.decoder_attention), encoder_output_units=encoder.output_units, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=( options.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == 'adaptive_loss' else None ), ) return cls(encoder, decoder) class LSTMEncoder(FairseqEncoder): """LSTM encoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, bidirectional=False, left_pad=True, pretrained_embed=None, padding_value=0., ): super().__init__(dictionary) self.num_layers = num_layers self.dropout_in = dropout_in self.dropout_out = dropout_out self.bidirectional = bidirectional self.hidden_size = hidden_size num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) else: self.embed_tokens = pretrained_embed self.lstm = LSTM( input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, dropout=self.dropout_out if num_layers > 1 else 0., bidirectional=bidirectional, ) self.left_pad = left_pad self.padding_value = padding_value self.output_units = hidden_size if bidirectional: self.output_units *= 2 def forward(self, src_tokens, src_lengths): if self.left_pad: # nn.utils.rnn.pack_padded_sequence requires right-padding; # convert left-padding to right-padding src_tokens = utils.convert_padding_direction( src_tokens, self.padding_idx, left_to_right=True, ) bsz, seqlen = src_tokens.size() # embed tokens x = self.embed_tokens(src_tokens) x = F.dropout(x, p=self.dropout_in, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # pack embedded source tokens into a PackedSequence packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist()) # apply LSTM if self.bidirectional: state_size = 2 * self.num_layers, bsz, self.hidden_size else: state_size = self.num_layers, bsz, self.hidden_size h0 = x.new_zeros(*state_size) c0 = x.new_zeros(*state_size) packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_value) x = F.dropout(x, p=self.dropout_out, training=self.training) assert list(x.size()) == [seqlen, bsz, self.output_units] if self.bidirectional: def combine_bidir(outs): out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous() return out.view(self.num_layers, bsz, -1) final_hiddens = combine_bidir(final_hiddens) final_cells = combine_bidir(final_cells) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() return { 'encoder_out': (x, final_hiddens, final_cells), 'encoder_padding_mask': encoder_padding_mask if encoder_padding_mask.any() else None } def reorder_encoder_out(self, encoder_out, new_order): encoder_out['encoder_out'] = tuple( eo.index_select(1, new_order) for eo in encoder_out['encoder_out'] ) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(1, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return int(1e5) # an arbitrary large number class AttentionLayer(nn.Module): def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False): super().__init__() self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias) self.output_proj = Linear(input_embed_dim + source_embed_dim, output_embed_dim, bias=bias) def forward(self, input, source_hids, encoder_padding_mask): # input: bsz x input_embed_dim # source_hids: srclen x bsz x source_embed_dim # x: bsz x source_embed_dim x = self.input_proj(input) # compute attention attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2) # don't attend over padding if encoder_padding_mask is not None: attn_scores = attn_scores.float().masked_fill_( encoder_padding_mask, float('-inf') ).type_as(attn_scores) # FP16 support: cast to float and back attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz # sum weighted sources x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0) x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1))) return x, attn_scores class LSTMDecoder(FairseqIncrementalDecoder): """LSTM decoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True, encoder_output_units=512, pretrained_embed=None, share_input_output_embed=False, adaptive_softmax_cutoff=None, ): super().__init__(dictionary) self.dropout_in = dropout_in self.dropout_out = dropout_out self.hidden_size = hidden_size self.share_input_output_embed = share_input_output_embed self.need_attn = True self.adaptive_softmax = None num_embeddings = len(dictionary) padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) else: self.embed_tokens = pretrained_embed self.encoder_output_units = encoder_output_units if encoder_output_units != hidden_size: self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size) self.encoder_cell_proj = Linear(encoder_output_units, hidden_size) else: self.encoder_hidden_proj = self.encoder_cell_proj = None self.layers = nn.ModuleList([ LSTMCell( input_size=hidden_size + embed_dim if layer == 0 else hidden_size, hidden_size=hidden_size, ) for layer in range(num_layers) ]) if attention: # TODO make bias configurable self.attention = AttentionLayer(hidden_size, encoder_output_units, hidden_size, bias=False) else: self.attention = None if hidden_size != out_embed_dim: self.additional_fc = Linear(hidden_size, out_embed_dim) if adaptive_softmax_cutoff is not None: # setting adaptive_softmax dropout to dropout_out for now but can be redefined self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, hidden_size, adaptive_softmax_cutoff, dropout=dropout_out) elif not self.share_input_output_embed: self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) def forward(self, prev_output_tokens, encoder_out, incremental_state=None): x, attn_scores = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) return self.output_layer(x), attn_scores def extract_features( self, prev_output_tokens, encoder_out, incremental_state=None ): """ Similar to *forward* but only return features. """ encoder_padding_mask = encoder_out['encoder_padding_mask'] encoder_out = encoder_out['encoder_out'] if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() # get outputs from encoder encoder_outs, encoder_hiddens, encoder_cells = encoder_out[:3] srclen = encoder_outs.size(0) # embed tokens x = self.embed_tokens(prev_output_tokens) x = F.dropout(x, p=self.dropout_in, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental generation) cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state') if cached_state is not None: prev_hiddens, prev_cells, input_feed = cached_state else: num_layers = len(self.layers) prev_hiddens = [encoder_hiddens[i] for i in range(num_layers)] prev_cells = [encoder_cells[i] for i in range(num_layers)] if self.encoder_hidden_proj is not None: prev_hiddens = [self.encoder_hidden_proj(x) for x in prev_hiddens] prev_cells = [self.encoder_cell_proj(x) for x in prev_cells] input_feed = x.new_zeros(bsz, self.hidden_size) attn_scores = x.new_zeros(srclen, seqlen, bsz) outs = [] for j in range(seqlen): # input feeding: concatenate context vector from previous time step input = torch.cat((x[j, :, :], input_feed), dim=1) for i, rnn in enumerate(self.layers): # recurrent cell hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) # hidden state becomes the input to the next layer input = F.dropout(hidden, p=self.dropout_out, training=self.training) # save state for next time step prev_hiddens[i] = hidden prev_cells[i] = cell # apply attention using the last layer's hidden state if self.attention is not None: out, attn_scores[:, j, :] = self.attention(hidden, encoder_outs, encoder_padding_mask) else: out = hidden out = F.dropout(out, p=self.dropout_out, training=self.training) # input feeding input_feed = out # save final output outs.append(out) # cache previous states (no-op except during incremental generation) utils.set_incremental_state( self, incremental_state, 'cached_state', (prev_hiddens, prev_cells, input_feed), ) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) # T x B x C -> B x T x C x = x.transpose(1, 0) if hasattr(self, 'additional_fc') and self.adaptive_softmax is None: x = self.additional_fc(x) x = F.dropout(x, p=self.dropout_out, training=self.training) # srclen x tgtlen x bsz -> bsz x tgtlen x srclen if not self.training and self.need_attn: attn_scores = attn_scores.transpose(0, 2) else: attn_scores = None return x, attn_scores def output_layer(self, x): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = self.fc_out(x) return x def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state') if cached_state is None: return def reorder_state(state): if isinstance(state, list): return [reorder_state(state_i) for state_i in state] return state.index_select(0, new_order) new_state = tuple(map(reorder_state, cached_state)) utils.set_incremental_state(self, incremental_state, 'cached_state', new_state) def max_positions(self): """Maximum output length supported by the decoder.""" return int(1e5) # an arbitrary large number def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def LSTM(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if 'weight' in name or 'bias' in name: param.data.uniform_(-0.1, 0.1) return m def LSTMCell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if 'weight' in name or 'bias' in name: param.data.uniform_(-0.1, 0.1) return m def Linear(in_features, out_features, bias=True, dropout=0): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m @register_model_architecture('lstm', 'lstm') def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_freeze_embed = getattr(args, 'encoder_freeze_embed', False) args.encoder_hidden_size = getattr(args, 'encoder_hidden_size', args.encoder_embed_dim) args.encoder_layers = getattr(args, 'encoder_layers', 1) args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False) args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_freeze_embed = getattr(args, 'decoder_freeze_embed', False) args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', args.decoder_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 1) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) args.decoder_attention = getattr(args, 'decoder_attention', '1') args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000') @register_model_architecture('lstm', 'lstm_wiseman_iwslt_de_en') def lstm_wiseman_iwslt_de_en(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', 0) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', 0) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout) base_architecture(args) @register_model_architecture('lstm', 'lstm_luong_wmt_en_de') def lstm_luong_wmt_en_de(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000) args.encoder_layers = getattr(args, 'encoder_layers', 4) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1000) args.decoder_layers = getattr(args, 'decoder_layers', 4) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 1000) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', 0) base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/lstm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import FairseqEncoder class CompositeEncoder(FairseqEncoder): """ A wrapper around a dictionary of :class:`FairseqEncoder` objects. We run forward on each encoder and return a dictionary of outputs. The first encoder's dictionary is used for initialization. Args: encoders (dict): a dictionary of :class:`FairseqEncoder` objects. """ def __init__(self, encoders): super().__init__(next(iter(encoders.values())).dictionary) self.encoders = encoders for key in self.encoders: self.add_module(key, self.encoders[key]) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: the outputs from each Encoder """ encoder_out = {} for key in self.encoders: encoder_out[key] = self.encoders[key](src_tokens, src_lengths) return encoder_out def reorder_encoder_out(self, encoder_out, new_order): """Reorder encoder output according to new_order.""" for key in self.encoders: encoder_out[key] = self.encoders[key].reorder_encoder_out(encoder_out[key], new_order) return encoder_out def max_positions(self): return min([self.encoders[key].max_positions() for key in self.encoders]) def upgrade_state_dict(self, state_dict): for key in self.encoders: self.encoders[key].upgrade_state_dict(state_dict) return state_dict
data2vec_vision-main
infoxlm/fairseq/fairseq/models/composite_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.levenshtein_transformer import ( LevenshteinTransformerDecoder, LevenshteinTransformerModel, ) from fairseq.models.transformer import Linear, TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange class NegativeDistanceScore(object): def __init__(self): # pre-compute some values self.scores = {} self.scores[0.5] = self.compute_score_full(50, 0.5) self.scores[1.0] = self.compute_score_full(50, 1.0) self.scores[2.0] = self.compute_score_full(50, 2.0) def __call__(self, i, L, tau): if (tau is None) or (tau > 1000): return 1 / L if tau in self.scores: if L < self.scores[tau].shape[0]: return self.scores[tau][L - 1, i] return self.compute_score(L, tau)[i] def compute_score(self, L, tau): s = np.array([-abs(L / 2 - i) / tau for i in range(L)]) s = np.exp(s - s.max()) return s / s.sum() def compute_score_full(self, L, tau): s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau s = np.tril(s, 0) + np.triu(s - float("inf"), 1) s = np.exp(s - s.max(1, keepdims=True)) return s / s.sum(1, keepdims=True) neg_scorer = NegativeDistanceScore() def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n') raise e B = in_tokens.size(0) T = in_tokens.size(1) V = vocab_size with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) insert_labels = [a[:-1] for a in full_labels] # numericalize1 insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() insert_index, insert_labels = zip( *[ (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) for i, labels in enumerate(insert_labels) for j, label in enumerate(labels[1:-1]) for k, w in enumerate(label) ] ) # HACK 1:-1 insert_index, insert_labels = [ torch.tensor(list(a), device=in_tokens.device) for a in [insert_index, insert_labels] ] insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) insert_label_tensors = insert_label_tensors.view(B, T - 1, V) return insert_label_tensors def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): padding_masks = in_tokens[:, 1:].eq(padding_idx) word_ins_scores.masked_fill_(padding_masks, 0.0) word_ins_pred.masked_fill_(padding_masks, padding_idx) in_coords = new_arange(in_tokens).type_as(in_scores) # shift all padding predictions to infinite out_coords = (in_coords[:, 1:] - 0.5).masked_fill( word_ins_pred.eq(padding_idx), float("inf") ) out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) return out_tokens, out_scores @register_model("insertion_transformer") class InsertionTransformerModel(LevenshteinTransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) @staticmethod def add_args(parser): TransformerModel.add_args(parser) parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) parser.add_argument("--label-tau", default=None, type=float) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion word_ins_out = self.decoder.forward_word_ins( prev_output_tokens, encoder_out=encoder_out ) word_ins_tgt = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk, len(self.tgt_dict), tau=self.decoder.label_tau, ).type_as(word_ins_out) word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_masks, "ls": self.args.label_smoothing, "nll_loss": True } } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # TODO: decoding for InsertionTransformer word_ins_out = self.decoder.forward_word_ins( output_tokens, encoder_out=encoder_out ) word_ins_score = F.log_softmax(word_ins_out, 2) if eos_penalty > 0.0: word_ins_score[:, :, self.pad] -= eos_penalty word_ins_score, word_ins_pred = word_ins_score.max(-1) output_tokens, output_scores = _apply_ins_words( output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history ) class InsertionTransformerDecoder(LevenshteinTransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): # use the TransformerDecoder's __init__ super(LevenshteinTransformerDecoder, self).__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim) self.label_tau = getattr(args, "label_tau", None) def forward_word_ins(self, prev_output_tokens, encoder_out=None): features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0] features = self.pool_out( torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) ) return self.output_layer(features) def forward_mask_ins(self, *args, **kwargs): raise NotImplementedError def forward_word_del(self, *args, **kwargs): raise NotImplementedError def forward_word_del_mask_ins(self, *args, **kwargs): raise NotImplementedError @register_model_architecture("insertion_transformer", "insertion_transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # special for insertion transformer args.label_tau = getattr(args, "label_tau", None)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/insertion_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( BaseFairseqModel, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, SinusoidalPositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params @register_model('masked_lm') class MaskedLMModel(BaseFairseqModel): """ Class for training a Masked Language Model. It also supports an additional sentence level prediction if the sent-loss argument is set. """ def __init__(self, args, encoder): super().__init__() self.args = args self.encoder = encoder # if specified then apply bert initialization on the model. We need # to explictly call this to make sure that the output embeddings # and projection layers are also correctly initialized if getattr(args, 'apply_bert_init', False): self.apply(init_bert_params) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # Arguments related to dropout parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for' ' attention weights') parser.add_argument('--act-dropout', type=float, metavar='D', help='dropout probability after' ' activation in FFN') # Arguments related to hidden states and self-attention parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--bias-kv', action='store_true', help='if set, adding a learnable bias kv') parser.add_argument('--zero-attn', action='store_true', help='if set, pads attn with zero') # Arguments related to input and output embeddings parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--share-encoder-input-output-embed', action='store_true', help='share encoder input' ' and output embeddings') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings' ' (outside self attention)') parser.add_argument('--num-segment', type=int, metavar='N', help='num segment in the input') # Arguments related to sentence level prediction parser.add_argument('--sentence-class-num', type=int, metavar='N', help='number of classes for sentence task') parser.add_argument('--sent-loss', action='store_true', help='if set,' ' calculate sentence level predictions') # Arguments related to parameter initialization parser.add_argument('--apply-bert-init', action='store_true', help='use custom param initialization for BERT') # misc params parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='Which activation function to use for pooler layer.') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') def forward(self, src_tokens, segment_labels=None, **kwargs): return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs) def max_positions(self): return self.encoder.max_positions @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_positions'): args.max_positions = args.tokens_per_sample print("Model args: ", args) encoder = MaskedLMEncoder(args, task.dictionary) return cls(args, encoder) class MaskedLMEncoder(FairseqEncoder): """ Encoder for Masked Language Modelling. """ def __init__(self, args, dictionary): super().__init__(dictionary) self.padding_idx = dictionary.pad() self.vocab_size = dictionary.__len__() self.max_positions = args.max_positions self.sentence_encoder = TransformerSentenceEncoder( padding_idx=self.padding_idx, vocab_size=self.vocab_size, num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=self.max_positions, num_segments=args.num_segment, use_position_embeddings=not args.no_token_positional_embeddings, encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, add_bias_kv=args.bias_kv, add_zero_attn=args.zero_attn, ) self.share_input_output_embed = args.share_encoder_input_output_embed self.embed_out = None self.sentence_projection_layer = None self.sentence_out_dim = args.sentence_class_num self.lm_output_learned_bias = None # Remove head is set to true during fine-tuning self.load_softmax = not getattr(args, 'remove_head', False) self.masked_lm_pooler = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn) self.lm_head_transform_weight = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim) self.activation_fn = utils.get_activation_fn(args.activation_fn) self.layer_norm = LayerNorm(args.encoder_embed_dim) self.lm_output_learned_bias = None if self.load_softmax: self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size)) if not self.share_input_output_embed: self.embed_out = nn.Linear( args.encoder_embed_dim, self.vocab_size, bias=False ) if args.sent_loss: self.sentence_projection_layer = nn.Linear( args.encoder_embed_dim, self.sentence_out_dim, bias=False ) def forward(self, src_tokens, segment_labels=None, **unused): """ Forward pass for Masked LM encoder. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). Here we assume that the sentence representation corresponds to the output of the classification_token (see bert_task or cross_lingual_lm task for more details). Args: - src_tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Returns: - a tuple of the following: - logits for predictions in format B x T x C to be used in softmax afterwards - a dictionary of additional data, where 'pooled_output' contains the representation for classification_token and 'inner_states' is a list of internal model states used to compute the predictions (similar in ELMO). 'sentence_logits' is the prediction logit for NSP task and is only computed if this is specified in the input arguments. """ inner_states, sentence_rep = self.sentence_encoder( src_tokens, segment_labels=segment_labels, ) x = inner_states[-1].transpose(0, 1) x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x))) pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep)) # project back to size of vocabulary if self.share_input_output_embed \ and hasattr(self.sentence_encoder.embed_tokens, 'weight'): x = F.linear(x, self.sentence_encoder.embed_tokens.weight) elif self.embed_out is not None: x = self.embed_out(x) if self.lm_output_learned_bias is not None: x = x + self.lm_output_learned_bias sentence_logits = None if self.sentence_projection_layer: sentence_logits = self.sentence_projection_layer(pooled_output) return x, { 'inner_states': inner_states, 'pooled_output': pooled_output, 'sentence_logits': sentence_logits } def max_positions(self): """Maximum output length supported by the encoder.""" return self.max_positions def upgrade_state_dict_named(self, state_dict, name): if isinstance( self.sentence_encoder.embed_positions, SinusoidalPositionalEmbedding ): state_dict[ name + '.sentence_encoder.embed_positions._float_tensor' ] = torch.FloatTensor(1) if not self.load_softmax: for k in list(state_dict.keys()): if ( "embed_out.weight" in k or "sentence_projection_layer.weight" in k or "lm_output_learned_bias" in k ): del state_dict[k] return state_dict @register_model_architecture('masked_lm', 'masked_lm') def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.act_dropout = getattr(args, 'act_dropout', 0.0) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.bias_kv = getattr(args, 'bias_kv', False) args.zero_attn = getattr(args, 'zero_attn', False) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.num_segment = getattr(args, 'num_segment', 2) args.sentence_class_num = getattr(args, 'sentence_class_num', 2) args.sent_loss = getattr(args, 'sent_loss', False) args.apply_bert_init = getattr(args, 'apply_bert_init', False) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) @register_model_architecture('masked_lm', 'bert_base') def bert_base_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.share_encoder_input_output_embed = getattr( args, 'share_encoder_input_output_embed', True) args.no_token_positional_embeddings = getattr( args, 'no_token_positional_embeddings', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.num_segment = getattr(args, 'num_segment', 2) args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072) args.bias_kv = getattr(args, 'bias_kv', False) args.zero_attn = getattr(args, 'zero_attn', False) args.sentence_class_num = getattr(args, 'sentence_class_num', 2) args.sent_loss = getattr(args, 'sent_loss', True) args.apply_bert_init = getattr(args, 'apply_bert_init', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) base_architecture(args) @register_model_architecture('masked_lm', 'bert_large') def bert_large_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_layers = getattr(args, 'encoder_layers', 24) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) bert_base_architecture(args) @register_model_architecture('masked_lm', 'xlm_base') def xlm_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.share_encoder_input_output_embed = getattr( args, 'share_encoder_input_output_embed', True) args.no_token_positional_embeddings = getattr( args, 'no_token_positional_embeddings', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.num_segment = getattr(args, 'num_segment', 1) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.bias_kv = getattr(args, 'bias_kv', False) args.zero_attn = getattr(args, 'zero_attn', False) args.sent_loss = getattr(args, 'sent_loss', False) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.apply_bert_init = getattr(args, 'apply_bert_init', True) base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import FairseqDecoder class FairseqIncrementalDecoder(FairseqDecoder): """Base class for incremental decoders. Incremental decoding is a special mode at inference time where the Model only receives a single timestep of input corresponding to the previous output token (for teacher forcing) and must produce the next output *incrementally*. Thus the model must cache any long-term state that is needed about the sequence, e.g., hidden states, convolutional states, etc. Compared to the standard :class:`FairseqDecoder` interface, the incremental decoder interface allows :func:`forward` functions to take an extra keyword argument (*incremental_state*) that can be used to cache state across time-steps. The :class:`FairseqIncrementalDecoder` interface also defines the :func:`reorder_incremental_state` method, which is used during beam search to select and reorder the incremental state based on the selection of beams. To learn more about how incremental decoding works, refer to `this blog <http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_. """ def __init__(self, dictionary): super().__init__(dictionary) def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention incremental_state (dict, optional): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ raise NotImplementedError def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def reorder_incremental_state(self, incremental_state, new_order): """Reorder incremental state. This should be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ seen = set() def apply_reorder_incremental_state(module): if module != self and hasattr(module, 'reorder_incremental_state') \ and module not in seen: seen.add(module) module.reorder_incremental_state(incremental_state, new_order) self.apply(apply_reorder_incremental_state) def set_beam_size(self, beam_size): """Sets the beam size in the decoder and all children.""" if getattr(self, '_beam_size', -1) != beam_size: seen = set() def apply_set_beam_size(module): if module != self and hasattr(module, 'set_beam_size') \ and module not in seen: seen.add(module) module.set_beam_size(beam_size) self.apply(apply_set_beam_size) self._beam_size = beam_size
data2vec_vision-main
infoxlm/fairseq/fairseq/models/fairseq_incremental_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, DynamicConv, LayerNorm, PositionalEmbedding, LightweightConv, MultiheadAttention, ) @register_model('lightconv') class LightConvModel(FairseqEncoderDecoderModel): """ LightConv and DynamicConv model from `"Pay Less Attention with Lightweight and Dynamic Convolutions" (Wu, et al, 2019) <https://openreview.net/pdf?id=SkVhlh09tX>`_. To use LightConv please set --encoder-conv-type lightweight --decoder-conv-type lightweight To use DynamicConv please set --encoder-conv-type dynamic --decoder-conv-type dynamic Args: encoder (LightConvEncoder): the encoder decoder (LightConvDecoder): the decoder The LightConv model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.lightconv_parser :prog: """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--relu-dropout', type=float, metavar='D', help='dropout probability after ReLU in FFN') parser.add_argument('--input-dropout', type=float, metavar='D', help='dropout probability of the inputs') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-conv-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads or LightConv/DynamicConv heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-conv-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads or LightConv/DynamicConv heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') """LightConv and DynamicConv arguments""" parser.add_argument('--encoder-kernel-size-list', type=lambda x: options.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31,31]")') parser.add_argument('--decoder-kernel-size-list', type=lambda x: options.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")') parser.add_argument('--encoder-glu', type=options.eval_bool, help='glu after in proj') parser.add_argument('--decoder-glu', type=options.eval_bool, help='glu after in proj') parser.add_argument('--encoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution') parser.add_argument('--decoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution') parser.add_argument('--weight-softmax', default=True, type=options.eval_bool) parser.add_argument('--weight-dropout', type=float, metavar='D', help='dropout probability for conv weights') @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = 1024 if not hasattr(args, 'max_target_positions'): args.max_target_positions = 1024 src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise RuntimeError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise RuntimeError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise RuntimeError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) encoder = LightConvEncoder(args, src_dict, encoder_embed_tokens) decoder = LightConvDecoder(args, tgt_dict, decoder_embed_tokens) return LightConvModel(encoder, decoder) class LightConvEncoder(FairseqEncoder): """ LightConv encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`LightConvEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ LightConvEncoderLayer(args, kernel_size=args.encoder_kernel_size_list[i]) for i in range(args.encoder_layers) ]) self.register_buffer('version', torch.Tensor([2])) self.normalize = args.encoder_normalize_before if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward(self, src_tokens, **unused): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.normalize: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) class LightConvDecoder(FairseqIncrementalDecoder): """ LightConv decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`LightConvDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False, final_norm=True): super().__init__(dictionary) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ LightConvDecoderLayer(args, no_encoder_attn, kernel_size=args.decoder_kernel_size_list[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, output_embed_dim, bias=False) \ if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5) self.register_buffer('version', torch.Tensor([2])) self.normalize = args.decoder_normalize_before and final_norm if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the last decoder layer's output of shape `(batch, tgt_len, vocab)` - the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)` """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, incremental_state, ) inner_states.append(x) if self.normalize: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = F.linear(x, self.embed_out) return x, {'attn': attn, 'inner_states': inner_states} def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] class LightConvEncoderLayer(nn.Module): """Encoder layer block. Args: args (argparse.Namespace): parsed command-line arguments kernel_size: kernel size of the convolution """ def __init__(self, args, kernel_size=0): super().__init__() self.embed_dim = args.encoder_embed_dim self.conv_dim = args.encoder_conv_dim padding_l = kernel_size // 2 if kernel_size % 2 == 1 else ((kernel_size - 1) // 2, kernel_size // 2) if args.encoder_glu: self.linear1 = Linear(self.embed_dim, 2*self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.encoder_conv_type == 'lightweight': self.conv = LightweightConv(self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout) elif args.encoder_conv_type == 'dynamic': self.conv = DynamicConv(self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout = args.dropout self.relu_dropout = args.relu_dropout self.input_dropout = args.input_dropout self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for _ in range(2)]) def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(0, x, before=True) x = F.dropout(x, p=self.input_dropout, training=self.training) x = self.linear1(x) if self.act is not None: x = self.act(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0, 1).unsqueeze(2), 0) x = self.conv(x) x = self.linear2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(0, x, after=True) residual = x x = self.maybe_layer_norm(1, x, before=True) x = F.relu(self.fc1(x)) x = F.dropout(x, p=self.relu_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(1, x, after=True) return x def maybe_layer_norm(self, i, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return self.layer_norms[i](x) else: return x def extra_repr(self): return 'dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}'.format( self.dropout, self.relu_dropout, self.input_dropout, self.normalize_before) class LightConvDecoderLayer(nn.Module): """Decoder layer block. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` kernel_size: kernel size of the convolution """ def __init__(self, args, no_encoder_attn=False, kernel_size=0): super().__init__() self.embed_dim = args.decoder_embed_dim self.conv_dim = args.decoder_conv_dim if args.decoder_glu: self.linear1 = Linear(self.embed_dim, 2*self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.decoder_conv_type == 'lightweight': self.conv = LightweightConv(self.conv_dim, kernel_size, padding_l=kernel_size-1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout) elif args.decoder_conv_type == 'dynamic': self.conv = DynamicConv(self.conv_dim, kernel_size, padding_l=kernel_size-1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout = args.dropout self.relu_dropout = args.relu_dropout self.input_dropout = args.input_dropout self.normalize_before = args.decoder_normalize_before self.conv_layer_norm = LayerNorm(self.embed_dim) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.need_attn = True def forward(self, x, encoder_out, encoder_padding_mask, incremental_state, prev_conv_state=None, prev_attn_state=None, conv_mask=None, conv_padding_mask=None): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.conv_layer_norm, x, before=True) if prev_conv_state is not None: if incremental_state is None: incremental_state = {} self.conv._set_input_buffer(incremental_state, prev_conv_state) x = F.dropout(x, p=self.input_dropout, training=self.training) x = self.linear1(x) if self.act is not None: x = self.act(x) x = self.conv(x, incremental_state=incremental_state) x = self.linear2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.conv_layer_norm, x, after=True) attn = None if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = F.relu(self.fc1(x)) x = F.dropout(x, p=self.relu_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def extra_repr(self): return 'dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}'.format( self.dropout, self.relu_dropout, self.input_dropout, self.normalize_before) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('lightconv', 'lightconv') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 7) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.relu_dropout = getattr(args, 'relu_dropout', 0.) args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.encoder_conv_dim = getattr(args, 'encoder_conv_dim', args.encoder_embed_dim) args.decoder_conv_dim = getattr(args, 'decoder_conv_dim', args.decoder_embed_dim) args.encoder_kernel_size_list = getattr(args, 'encoder_kernel_size_list', [3, 7, 15, 31, 31, 31, 31]) args.decoder_kernel_size_list = getattr(args, 'decoder_kernel_size_list', [3, 7, 15, 31, 31, 31]) if len(args.encoder_kernel_size_list) == 1: args.encoder_kernel_size_list = args.encoder_kernel_size_list * args.encoder_layers if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = args.decoder_kernel_size_list * args.decoder_layers assert len(args.encoder_kernel_size_list) == args.encoder_layers, "encoder_kernel_size_list doesn't match encoder_layers" assert len(args.decoder_kernel_size_list) == args.decoder_layers, "decoder_kernel_size_list doesn't match decoder_layers" args.encoder_glu = getattr(args, 'encoder_glu', True) args.decoder_glu = getattr(args, 'decoder_glu', True) args.input_dropout = getattr(args, 'input_dropout', 0.1) args.weight_dropout = getattr(args, 'weight_dropout', args.attention_dropout) @register_model_architecture('lightconv', 'lightconv_iwslt_de_en') def lightconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 7) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.weight_dropout = getattr(args, 'weight_dropout', 0.1) args.encoder_glu = getattr(args, 'encoder_glu', False) args.decoder_glu = getattr(args, 'decoder_glu', False) args.input_dropout = getattr(args, 'input_dropout', 0.0) base_architecture(args) @register_model_architecture('lightconv', 'lightconv_wmt_en_de') def lightconv_wmt_en_de(args): base_architecture(args) @register_model_architecture('lightconv', 'lightconv_wmt_en_de_big') def lightconv_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('lightconv', 'lightconv_wmt_en_fr_big') def lightconv_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) lightconv_wmt_en_de_big(args) @register_model_architecture('lightconv', 'lightconv_wmt_zh_en_big') def lightconv_wmt_zh_en_big(args): args.dropout = getattr(args, 'dropout', 0.2) args.attention_dropout = getattr(args, 'attention_dropout', 0.2) args.weight_dropout = getattr(args, 'weight_dropout', 0.2) lightconv_wmt_en_de_big(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/lightconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import options from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.lightconv import ( Embedding, LightConvDecoder, ) from fairseq.modules import ( AdaptiveInput, CharacterTokenEmbedder, ) @register_model('lightconv_lm') class LightConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--dropout', default=0.1, type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', default=0., type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--relu-dropout', default=0., type=float, metavar='D', help='dropout probability after ReLU in FFN') parser.add_argument('--input-dropout', type=float, metavar='D', help='dropout probability of the inputs') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension') parser.add_argument('--decoder-input-dim', type=int, metavar='N', help='decoder input dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads or LightConv/DynamicConv heads') parser.add_argument('--decoder-normalize-before', default=False, action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') parser.add_argument('--character-embeddings', default=False, action='store_true', help='if set, uses character embedding convolutions to produce token embeddings') parser.add_argument('--character-filters', type=str, metavar='LIST', default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', help='size of character embeddings') parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4, help='size of character embeddings') parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2, help='number of highway layers for character token embeddder') parser.add_argument('--adaptive-input', default=False, action='store_true', help='if set, uses adaptive input') parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor') parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.') parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input') parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') """LightConv and DynamicConv arguments""" parser.add_argument('--decoder-kernel-size-list', type=lambda x: options.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")') parser.add_argument('--decoder-glu', type=options.eval_bool, help='glu after in proj') parser.add_argument('--decoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution') parser.add_argument('--weight-softmax', default=True, type=options.eval_bool) parser.add_argument('--weight-dropout', type=float, metavar='D', help='dropout probability for conv weights') @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if getattr(args, 'max_source_positions', None) is None: args.max_source_positions = args.tokens_per_sample if getattr(args, 'max_target_positions', None) is None: args.max_target_positions = args.tokens_per_sample if args.character_embeddings: embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, ) elif args.adaptive_input: embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int)) else: embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, task.dictionary.pad()) if args.tie_adaptive_weights: assert args.adaptive_input assert args.adaptive_input_factor == args.adaptive_softmax_factor assert args.adaptive_softmax_cutoff == args.adaptive_input_cutoff, '{} != {}'.format( args.adaptive_softmax_cutoff, args.adaptive_input_cutoff) assert args.decoder_input_dim == args.decoder_output_dim decoder = LightConvDecoder(args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False) return LightConvLanguageModel(decoder) @register_model_architecture('lightconv_lm', 'lightconv_lm') def base_lm_architecture(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.character_embeddings = getattr(args, 'character_embeddings', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.decoder_conv_dim = getattr(args, 'decoder_conv_dim', args.decoder_embed_dim) # The model training is not stable without this args.decoder_normalize_before = True args.adaptive_input = getattr(args, 'adaptive_input', False) args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4) args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None) args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False) args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False) args.decoder_kernel_size_list = getattr(args, 'decoder_kernel_size_list', [3, 7, 15, 31, 31, 31]) if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = args.decoder_kernel_size_list * args.decoder_layers assert len(args.decoder_kernel_size_list) == args.decoder_layers, "decoder_kernel_size_list doesn't match decoder_layers" args.decoder_glu = getattr(args, 'decoder_glu', True) args.input_dropout = getattr(args, 'input_dropout', 0.1) args.weight_dropout = getattr(args, 'weight_dropout', args.attention_dropout) @register_model_architecture('lightconv_lm', 'lightconv_lm_gbw') def lightconv_lm_gbw(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) base_lm_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/lightconv_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hub_interface import * # noqa from .model import * # noqa
data2vec_vision-main
infoxlm/fairseq/fairseq/models/bart/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """ import torch.nn as nn from fairseq import utils from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface @register_model('bart') class BARTModel(TransformerModel): @classmethod def hub_models(cls): return { 'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz', 'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz', 'bart.large.cnn': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz', } def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): super(BARTModel, BARTModel).add_args(parser) parser.add_argument( '--pooler-dropout', type=float, metavar='D', help='dropout probability in the masked_lm pooler layers' ) parser.add_argument( '--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use for pooler layer' ) @property def supported_targets(self): return {'self'} def forward( self, src_tokens, src_lengths, prev_output_tokens, features_only=False, classification_head_name=None, **kwargs ): if classification_head_name is not None: features_only = True encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, **kwargs, ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, **kwargs, ) if classification_head_name is not None: sentence_representation = x[ src_tokens.eq(self.encoder.dictionary.eos()), : ].view(x.size(0), -1, x.size(-1))[:, -1, :] x = self.classification_heads[classification_head_name]( sentence_representation ) return x, extra @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='gpt2', **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return BARTHubInterface(x['args'], x['task'], x['models'][0]) def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs): """Register a classification head.""" print("Registering classification head: {0}".format(name)) if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: print( 'WARNING: re-registering head "{}" with num_classes {} (prev: {}) ' 'and inner_dim {} (prev: {})'.format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = BARTClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + '.' if name != '' else '' current_head_names = [] if not hasattr(self, 'classification_heads') else \ self.classification_heads.keys() # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + 'classification_heads.'): continue head_name = k[len(prefix + 'classification_heads.'):].split('.')[0] num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0) inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0) if getattr(self.args, 'load_checkpoint_heads', False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: print( 'WARNING: deleting classification head ({}) from checkpoint ' 'not present in current model: {}'.format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): print( 'WARNING: deleting classification head ({}) from checkpoint ' 'with different dimensions than current model: {}'.format(head_name, k) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] # When finetuning on translation task, remove last row of # embedding matrix that corresponds to mask_idx token. loaded_dict_size = state_dict['encoder.embed_tokens.weight'].size(0) if loaded_dict_size == len(self.encoder.dictionary) + 1 and '<mask>' not in self.encoder.dictionary: state_dict['encoder.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight'][:loaded_dict_size-1, :] state_dict['decoder.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight'][:loaded_dict_size-1, :] # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, 'classification_heads'): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + 'classification_heads.' + k not in state_dict: print('Overwriting', prefix + 'classification_heads.' + k) state_dict[prefix + 'classification_heads.' + k] = v class BARTClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x @register_model_architecture('bart', 'bart_large') def bart_large_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024) args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.relu_dropout = getattr(args, 'relu_dropout', 0.) args.dropout = getattr(args, 'dropout', 0.1) args.max_target_positions = getattr(args, 'max_target_positions', 1024) args.max_source_positions = getattr(args, 'max_source_positions', 1024) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True) args.share_all_embeddings = getattr(args, 'share_all_embeddings', True) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.no_scale_embedding = getattr(args, 'no_scale_embedding', True) args.layernorm_embedding = getattr(args, 'layernorm_embedding', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/bart/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from typing import List from fairseq import utils from fairseq.data import encoders class BARTHubInterface(nn.Module): """A simple PyTorch Hub interface to BART. Usage: https://github.com/pytorch/fairseq/tree/master/examples/BART """ def __init__(self, args, task, model): super().__init__() self.args = args self.task = task self.model = model self.bpe = encoders.build_bpe(args) self.max_positions = min(utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), )) # this is useful for determining the device self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def encode(self, sentence: str, *addl_sentences, no_separator=True) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`). Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> bart.encode('Hello world').tolist() [0, 31414, 232, 2] >>> bart.encode(' world').tolist() [0, 232, 2] >>> bart.encode('world').tolist() [0, 8331, 2] """ tokens = self.bpe.encode(sentence) if len(tokens.split(' ')) > self.max_positions - 2: tokens = ' '.join(tokens.split(' ')[:self.max_positions - 2]) bpe_sentence = '<s> ' + tokens + ' </s>' for s in addl_sentences: bpe_sentence += (' </s>' if not no_separator else '') bpe_sentence += ' ' + self.bpe.encode(s) + ' </s>' tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.cpu().numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = (tokens == self.task.source_dictionary.eos()) doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences] if len(sentences) == 1: return sentences[0] return sentences def _build_sample(self, src_tokens: List[torch.LongTensor]): # assert torch.is_tensor(src_tokens) dataset = self.task.build_dataset_for_inference( src_tokens, [x.numel() for x in src_tokens], ) sample = dataset.collater(dataset) sample = utils.apply_to_sample( lambda tensor: tensor.to(self.device), sample ) return sample def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> str: input = [self.encode(sentence) for sentence in sentences] hypos = self.generate(input, beam, verbose, **kwargs) return [self.decode(x['tokens']) for x in hypos] def generate(self, tokens: List[torch.LongTensor], beam: int = 5, verbose: bool = False, **kwargs) -> torch.LongTensor: sample = self._build_sample(tokens) # build generator using current args as well as any kwargs gen_args = copy.copy(self.args) gen_args.beam = beam for k, v in kwargs.items(): setattr(gen_args, k, v) generator = self.task.build_generator(gen_args) translations = self.task.inference_step( generator, [self.model], sample, prefix_tokens=sample['net_input']['src_tokens'].new_zeros((len(tokens), 1)).fill_(self.task.source_dictionary.bos()), ) if verbose: src_str_with_unk = self.string(tokens) print('S\t{}'.format(src_str_with_unk)) def getarg(name, default): return getattr(gen_args, name, getattr(self.args, name, default)) # Process top predictions hypos = [x[0] for x in translations] hypos = [v for _, v in sorted(zip(sample['id'].tolist(), hypos))] return hypos def extract_features(self, tokens: torch.LongTensor, return_all_hiddens: bool = False) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > min(self.model.max_positions()): raise ValueError('tokens exceeds maximum length: {} > {}'.format( tokens.size(-1), self.model.max_positions() )) tokens.to(device=self.device), prev_output_tokens = tokens.clone() prev_output_tokens[:, 0] = tokens.gather( 1, (tokens.ne(self.task.source_dictionary.pad()).sum(dim=1)- 1).unsqueeze(-1), ).squeeze() prev_output_tokens[:, 1:] = tokens[:, :-1] features, extra = self.model( src_tokens=tokens, src_lengths=None, prev_output_tokens=prev_output_tokens, features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra['inner_states'] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): if tokens.dim() == 1: tokens = tokens.unsqueeze(0) features = self.extract_features(tokens.to(device=self.device)) sentence_representation = features[ tokens.eq(self.task.source_dictionary.eos()), : ].view(features.size(0), -1, features.size(-1))[:, -1, :] logits = self.model.classification_heads[head](sentence_representation) if return_logits: return logits return F.log_softmax(logits, dim=-1)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/bart/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import Counter from typing import List import torch def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]): """ Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy). Args: roberta (RobertaHubInterface): RoBERTa instance bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)` other_tokens (List[str]): other tokens of shape `(T_words)` Returns: List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*. """ assert bpe_tokens.dim() == 1 assert bpe_tokens[0] == 0 def clean(text): return text.strip() # remove whitespaces to simplify alignment bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens] bpe_tokens = [clean(roberta.bpe.decode(x) if x not in {'<s>', ''} else x) for x in bpe_tokens] other_tokens = [clean(str(o)) for o in other_tokens] # strip leading <s> bpe_tokens = bpe_tokens[1:] assert ''.join(bpe_tokens) == ''.join(other_tokens) # create alignment from every word to a list of BPE tokens alignment = [] bpe_toks = filter(lambda item: item[1] != '', enumerate(bpe_tokens, start=1)) j, bpe_tok = next(bpe_toks) for other_tok in other_tokens: bpe_indices = [] while True: if other_tok.startswith(bpe_tok): bpe_indices.append(j) other_tok = other_tok[len(bpe_tok):] try: j, bpe_tok = next(bpe_toks) except StopIteration: j, bpe_tok = None, None elif bpe_tok.startswith(other_tok): # other_tok spans multiple BPE tokens bpe_indices.append(j) bpe_tok = bpe_tok[len(other_tok):] other_tok = '' else: raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok)) if other_tok == '': break assert len(bpe_indices) > 0 alignment.append(bpe_indices) assert len(alignment) == len(other_tokens) return alignment def align_features_to_words(roberta, features, alignment): """ Align given features to words. Args: roberta (RobertaHubInterface): RoBERTa instance features (torch.Tensor): features to align of shape `(T_bpe x C)` alignment: alignment between BPE tokens and words returned by func:`align_bpe_to_words`. """ assert features.dim() == 2 bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices) assert bpe_counts[0] == 0 # <s> shouldn't be aligned denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))]) weighted_features = features / denom.unsqueeze(-1) output = [weighted_features[0]] largest_j = -1 for bpe_indices in alignment: output.append(weighted_features[bpe_indices].sum(dim=0)) largest_j = max(largest_j, *bpe_indices) for j in range(largest_j + 1, len(features)): output.append(weighted_features[j]) output = torch.stack(output) assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4) return output def spacy_nlp(): if getattr(spacy_nlp, '_nlp', None) is None: try: from spacy.lang.en import English spacy_nlp._nlp = English() except ImportError: raise ImportError('Please install spacy with: pip install spacy') return spacy_nlp._nlp def spacy_tokenizer(): if getattr(spacy_tokenizer, '_tokenizer', None) is None: try: nlp = spacy_nlp() spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp) except ImportError: raise ImportError('Please install spacy with: pip install spacy') return spacy_tokenizer._tokenizer
data2vec_vision-main
infoxlm/fairseq/fairseq/models/roberta/alignment_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hub_interface import * # noqa from .model import * # noqa
data2vec_vision-main
infoxlm/fairseq/fairseq/models/roberta/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ RoBERTa: A Robustly Optimized BERT Pretraining Approach. """ import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import RobertaHubInterface @register_model('roberta') class RobertaModel(FairseqLanguageModel): @classmethod def hub_models(cls): return { 'roberta.base': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.base.tar.gz', 'roberta.large': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz', 'roberta.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.mnli.tar.gz', 'roberta.large.wsc': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.wsc.tar.gz', } def __init__(self, args, encoder): super().__init__(encoder) self.args = args # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--encoder-layers', type=int, metavar='L', help='num encoder layers') parser.add_argument('--encoder-embed-dim', type=int, metavar='H', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='F', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-attention-heads', type=int, metavar='A', help='num encoder attention heads') parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use for pooler layer') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', type=float, metavar='D', help='dropout probability after activation in FFN') parser.add_argument('--pooler-dropout', type=float, metavar='D', help='dropout probability in the masked_lm pooler layers') parser.add_argument('--max-positions', type=int, help='number of positional embeddings to learn') parser.add_argument('--load-checkpoint-heads', action='store_true', help='(re-)register and load heads when loading checkpoints') # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for encoder') parser.add_argument('--encoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_architecture(args) if not hasattr(args, 'max_positions'): args.max_positions = args.tokens_per_sample encoder = RobertaEncoder(args, task.source_dictionary) return cls(args, encoder) def forward(self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, **kwargs): if classification_head_name is not None: features_only = True x, extra = self.decoder(src_tokens, features_only, return_all_hiddens, **kwargs) if classification_head_name is not None: x = self.classification_heads[classification_head_name](x) return x, extra def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs): """Register a classification head.""" if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: print( 'WARNING: re-registering head "{}" with num_classes {} (prev: {}) ' 'and inner_dim {} (prev: {})'.format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = RobertaClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, ) @property def supported_targets(self): return {'self'} @classmethod def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='gpt2', **kwargs): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return RobertaHubInterface(x['args'], x['task'], x['models'][0]) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + '.' if name != '' else '' current_head_names = [] if not hasattr(self, 'classification_heads') else \ self.classification_heads.keys() # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + 'classification_heads.'): continue head_name = k[len(prefix + 'classification_heads.'):].split('.')[0] num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0) inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0) if getattr(self.args, 'load_checkpoint_heads', False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: print( 'WARNING: deleting classification head ({}) from checkpoint ' 'not present in current model: {}'.format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): print( 'WARNING: deleting classification head ({}) from checkpoint ' 'with different dimensions than current model: {}'.format(head_name, k) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, 'classification_heads'): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + 'classification_heads.' + k not in state_dict: print('Overwriting', prefix + 'classification_heads.' + k) state_dict[prefix + 'classification_heads.' + k] = v @register_model('xlmr') class XLMRModel(RobertaModel): @classmethod def hub_models(cls): return { 'xlmr.base.v0': 'http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.v0.tar.gz', 'xlmr.large.v0': 'http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.v0.tar.gz', } @classmethod def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return RobertaHubInterface(x['args'], x['task'], x['models'][0]) @register_model('camembert') class CamembertModel(RobertaModel): @classmethod def hub_models(cls): return { 'camembert.v0': 'http://dl.fbaipublicfiles.com/fairseq/models/camembert.v0.tar.gz', } @classmethod def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return RobertaHubInterface(x['args'], x['task'], x['models'][0]) class RobertaLMHead(nn.Module): """Head for masked language modeling.""" def __init__(self, embed_dim, output_dim, activation_fn, weight=None): super().__init__() self.dense = nn.Linear(embed_dim, embed_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.layer_norm = LayerNorm(embed_dim) if weight is None: weight = nn.Linear(embed_dim, output_dim, bias=False).weight self.weight = weight self.bias = nn.Parameter(torch.zeros(output_dim)) def forward(self, features, masked_tokens=None, **kwargs): # Only project the unmasked tokens while training, # saves both memory and computation if masked_tokens is not None: features = features[masked_tokens, :] x = self.dense(features) x = self.activation_fn(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = F.linear(x, self.weight) + self.bias return x class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x class RobertaEncoder(FairseqDecoder): """RoBERTa encoder. Implements the :class:`~fairseq.models.FairseqDecoder` interface required by :class:`~fairseq.models.FairseqLanguageModel`. """ def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args # RoBERTa is a sentence encoder model, so users will intuitively trim # encoder layers. However, the implementation uses the fairseq decoder, # so we fix here. if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) args.decoder_layers_to_keep = args.encoder_layers_to_keep args.encoder_layers_to_keep = None self.sentence_encoder = TransformerSentenceEncoder( padding_idx=dictionary.pad(), vocab_size=len(dictionary), num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, layerdrop=args.encoder_layerdrop, max_seq_len=args.max_positions, num_segments=0, encoder_normalize_before=True, apply_bert_init=True, activation_fn=args.activation_fn, ) self.lm_head = RobertaLMHead( embed_dim=args.encoder_embed_dim, output_dim=len(dictionary), activation_fn=args.activation_fn, weight=self.sentence_encoder.embed_tokens.weight, ) def forward(self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused): """ Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` features_only (bool, optional): skip LM head and just return features. If True, the output will be of shape `(batch, src_len, embed_dim)`. return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: tuple: - the LM output of shape `(batch, src_len, vocab)` - a dictionary of additional data, where 'inner_states' is a list of hidden states. """ x, extra = self.extract_features(src_tokens, return_all_hiddens=return_all_hiddens) if not features_only: x = self.output_layer(x, masked_tokens=masked_tokens) return x, extra def extract_features(self, src_tokens, return_all_hiddens=False, **unused): inner_states, _ = self.sentence_encoder( src_tokens, last_state_only=not return_all_hiddens, ) features = inner_states[-1] return features, {'inner_states': inner_states if return_all_hiddens else None} def output_layer(self, features, masked_tokens=None, **unused): return self.lm_head(features, masked_tokens) def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions @register_model_architecture('roberta', 'roberta') def base_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.0) args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) args.encoder_layers_to_keep = getattr(args, 'encoder_layers_to_keep', None) args.encoder_layerdrop = getattr(args, 'encoder_layerdrop', 0.0) @register_model_architecture('roberta', 'roberta_base') def roberta_base_architecture(args): base_architecture(args) @register_model_architecture('roberta', 'roberta_large') def roberta_large_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 24) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) base_architecture(args) @register_model_architecture('roberta', 'xlm') def xlm_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 16) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1280) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1280*4) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) base_architecture(args)
data2vec_vision-main
infoxlm/fairseq/fairseq/models/roberta/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import encoders class RobertaHubInterface(nn.Module): """A simple PyTorch Hub interface to RoBERTa. Usage: https://github.com/pytorch/fairseq/tree/master/examples/roberta """ def __init__(self, args, task, model): super().__init__() self.args = args self.task = task self.model = model self.bpe = encoders.build_bpe(args) # this is useful for determining the device self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def encode(self, sentence: str, *addl_sentences, no_separator=False) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`) and we use an extra end-of-sentence (`</s>`) as a separator. Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> roberta.encode('Hello world').tolist() [0, 31414, 232, 2] >>> roberta.encode(' world').tolist() [0, 232, 2] >>> roberta.encode('world').tolist() [0, 8331, 2] """ bpe_sentence = '<s> ' + self.bpe.encode(sentence) + ' </s>' for s in addl_sentences: bpe_sentence += (' </s>' if not no_separator else '') bpe_sentence += ' ' + self.bpe.encode(s) + ' </s>' tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False, add_if_not_exist=False) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = (tokens == self.task.source_dictionary.eos()) doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences] if len(sentences) == 1: return sentences[0] return sentences def extract_features(self, tokens: torch.LongTensor, return_all_hiddens: bool = False) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > self.model.max_positions(): raise ValueError('tokens exceeds maximum length: {} > {}'.format( tokens.size(-1), self.model.max_positions() )) features, extra = self.model( tokens.to(device=self.device), features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra['inner_states'] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): features = self.extract_features(tokens.to(device=self.device)) logits = self.model.classification_heads[head](features) if return_logits: return logits return F.log_softmax(logits, dim=-1) def extract_features_aligned_to_words(self, sentence: str, return_all_hiddens: bool = False) -> torch.Tensor: """Extract RoBERTa features, aligned to spaCy's word-level tokenizer.""" from fairseq.models.roberta import alignment_utils from spacy.tokens import Doc nlp = alignment_utils.spacy_nlp() tokenizer = alignment_utils.spacy_tokenizer() # tokenize both with GPT-2 BPE and spaCy bpe_toks = self.encode(sentence) spacy_toks = tokenizer(sentence) spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)] alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws) # extract features and align them features = self.extract_features(bpe_toks, return_all_hiddens=return_all_hiddens) features = features.squeeze(0) aligned_feats = alignment_utils.align_features_to_words(self, features, alignment) # wrap in spaCy Doc doc = Doc( nlp.vocab, words=['<s>'] + [x.text for x in spacy_toks] + ['</s>'], spaces=[True] + [x.endswith(' ') for x in spacy_toks_ws[:-1]] + [True, False], ) assert len(doc) == aligned_feats.size(0) doc.user_token_hooks['vector'] = lambda token: aligned_feats[token.i] return doc def fill_mask(self, masked_input: str, topk: int = 5): masked_token = '<mask>' assert masked_token in masked_input and masked_input.count(masked_token) == 1, \ "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(masked_token) text_spans = masked_input.split(masked_token) text_spans_bpe = (' {0} '.format(masked_token)).join( [self.bpe.encode(text_span.rstrip()) for text_span in text_spans] ).strip() tokens = self.task.source_dictionary.encode_line( '<s> ' + text_spans_bpe + ' </s>', append_eos=False, add_if_not_exist=False, ) masked_index = (tokens == self.task.mask_idx).nonzero() if tokens.dim() == 1: tokens = tokens.unsqueeze(0) with utils.eval(self.model): features, extra = self.model( tokens.long().to(device=self.device), features_only=False, return_all_hiddens=False, ) logits = features[0, masked_index, :].squeeze() prob = logits.softmax(dim=0) values, index = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = self.task.source_dictionary.string(index) topk_filled_outputs = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ')): predicted_token = self.bpe.decode(predicted_token_bpe) # Quick hack to fix https://github.com/pytorch/fairseq/issues/1306 if predicted_token_bpe.startswith('\u2581'): predicted_token = ' ' + predicted_token if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append(( masked_input.replace( ' {0}'.format(masked_token), predicted_token ), values[index].item(), predicted_token, )) else: topk_filled_outputs.append(( masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token, )) return topk_filled_outputs def disambiguate_pronoun(self, sentence: str) -> bool: """ Usage:: >>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.') True >>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.') 'The trophy' """ assert hasattr(self.task, 'disambiguate_pronoun'), \ 'roberta.disambiguate_pronoun() requires a model trained with the WSC task.' with utils.eval(self.model): return self.task.disambiguate_pronoun(self.model, sentence, use_cuda=self.device.type == 'cuda')
data2vec_vision-main
infoxlm/fairseq/fairseq/models/roberta/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn class BeamableMM(nn.Module): """This module provides an optimized MM for beam decoding with attention. It leverage the fact that the source-side of the input is replicated beam times and the target-side of the input is of width one. This layer speeds up inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. """ def __init__(self, beam_size=None): super(BeamableMM, self).__init__() self.beam_size = beam_size def forward(self, input1, input2): if ( not self.training and # test mode self.beam_size is not None and # beam size is set input1.dim() == 3 and # only support batched input input1.size(1) == 1 # single time step update ): bsz, beam = input1.size(0), self.beam_size # bsz x 1 x nhu --> bsz/beam x beam x nhu input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu input2 = input2.unfold(0, beam, beam)[:, :, :, 0] # use non batched operation if bsz = beam if input1.size(0) == 1: output = torch.mm(input1[0, :, :], input2[0, :, :]) else: output = input1.bmm(input2) return output.view(bsz, 1, -1) else: return input1.bmm(input2) def set_beam_size(self, beam_size): self.beam_size = beam_size
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/beamable_mm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules.scalar_bias import scalar_bias class SingleHeadAttention(nn.Module): """ Single-head attention that supports Gating and Downsampling """ def __init__( self, out_channels, embed_dim, head_dim, head_index, dropout=0., bias=True, project_input=True, gated=False, downsample=False, num_heads=1, ): super().__init__() self.embed_dim = embed_dim self.dropout = dropout self.head_index = head_index self.head_dim = head_dim self.project_input = project_input self.gated = gated self.downsample = downsample self.num_heads = num_heads self.projection = None k_layers = [] v_layers = [] if self.downsample: k_layers.append(Downsample(self.head_index)) v_layers.append(Downsample(self.head_index)) out_proj_size = self.head_dim else: out_proj_size = self.head_dim * self.num_heads if self.gated: k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) else: k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_k = nn.Sequential(*k_layers) self.in_proj_v = nn.Sequential(*v_layers) if self.downsample: self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias) else: self.out_proj = Linear(out_proj_size, out_channels, bias=bias) self.scaling = self.head_dim**-0.5 def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ src_len, bsz, out_channels = key.size() tgt_len = query.size(0) assert list(query.size()) == [tgt_len, bsz, out_channels] assert key.size() == value.size() if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.downsample: size = bsz else: size = bsz * self.num_heads k = key v = value q = query if self.project_input: q = self.in_proj_q(q) k = self.in_proj_k(k) v = self.in_proj_v(v) src_len = k.size()[0] q *= self.scaling if not self.downsample: q = q.view(tgt_len, size, self.head_dim) k = k.view(src_len, size, self.head_dim) v = v.view(src_len, size, self.head_dim) q = q.transpose(0, 1) k = k.transpose(0, 1) v = v.transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) if mask_future_timesteps: assert query.size() == key.size(), \ 'mask_future_timesteps only applies to self-attention' attn_weights *= torch.tril( attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(), diagonal=-1, )[:, ::self.head_index + 1 if self.downsample else 1].unsqueeze(0) attn_weights += torch.triu( attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(), diagonal=0 )[:, ::self.head_index + 1 if self.downsample else 1].unsqueeze(0) tgt_size = tgt_len if use_scalar_bias: attn_weights = scalar_bias(attn_weights, 2) v = scalar_bias(v, 1) tgt_size += 1 if key_padding_mask is not None: # don't attend to padding symbols if key_padding_mask.max() > 0: if self.downsample: attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len) else: attn_weights = attn_weights.view(size, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), -math.inf, ) attn_weights = attn_weights.view(size, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training) attn = torch.bmm(attn_weights, v) if self.downsample: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) attn = self.out_proj(attn) return attn, attn_weights class DownsampledMultiHeadAttention(nn.ModuleList): """ Multi-headed attention with Gating and Downsampling """ def __init__( self, out_channels, embed_dim, num_heads, dropout=0., bias=True, project_input=True, gated=False, downsample=False, ): self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.downsample = downsample self.gated = gated self.project_input = project_input assert self.head_dim * num_heads == embed_dim if self.downsample: attention_heads = [] for index in range(self.num_heads): attention_heads.append( SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, index, self.dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) ) super().__init__(modules=attention_heads) self.out_proj = Linear(embed_dim, out_channels, bias=bias) else: # either we have a list of attention heads, or just one attention head # if not being downsampled, we can do the heads with one linear layer instead of separate ones super().__init__() self.attention_module = SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, 1, self.dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): src_len, bsz, embed_dim = key.size() tgt_len = query.size(0) assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() tgt_size = tgt_len if use_scalar_bias: tgt_size += 1 attn = [] attn_weights = [] if self.downsample: for attention_head_number in range(self.num_heads): # call the forward of each attention head _attn, _attn_weight = self[attention_head_number]( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn = self.out_proj(full_attn) return full_attn, attn_weights[0].clone() else: _attn, _attn_weight = self.attention_module( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn_weights = torch.cat(attn_weights) full_attn_weights = full_attn_weights.view(bsz, self.num_heads, tgt_size, src_len) full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads return full_attn, full_attn_weights class Downsample(nn.Module): """ Selects every nth element, where n is the index """ def __init__(self, index): super().__init__() self.index = index def forward(self, x): return x[::self.index+1] def Linear(in_features, out_features, dropout=0., bias=True): """Weight-normalized Linear layer (input: B x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return nn.utils.weight_norm(m) def GatedLinear(in_features, out_features, dropout=0., bias=True): """Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units""" return nn.Sequential( Linear(in_features, out_features*4, dropout, bias), nn.GLU(), Linear(out_features*2, out_features*2, dropout, bias), nn.GLU(), Linear(out_features, out_features, dropout, bias) )
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/downsampled_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from torch import nn from torch.nn import Parameter import torch.nn.functional as F from fairseq import utils class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \ 'value to be of the same size' self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias) self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False self.enable_torch_version = False if hasattr(F, "multi_head_attention_forward"): self.enable_torch_version = True else: self.enable_torch_version = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1/math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1/math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1/math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) nn.init.constant_(self.out_proj.bias, 0.) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward( self, query, key, value, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False, attn_mask=None, before_softmax=False, need_head_weights=False, ): """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if self.enable_torch_version and not self.onnx_trace and incremental_state is None and not static_kv: return F.multi_head_attention_forward(query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if 'prev_key' in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat( [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1) q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if 'prev_key' in saved_state: prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: k = torch.cat((prev_key, k), dim=1) if 'prev_value' in saved_state: prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: v = torch.cat((prev_value, v), dim=1) key_padding_mask = self._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=saved_state.get('prev_key_padding_mask', None), batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state['prev_key_padding_mask'] = key_padding_mask self._set_input_buffer(incremental_state, saved_state) src_len = k.size(1) # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]): key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat( [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf'), ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils.softmax(attn_weights, dim=-1, onnx_trace=self.onnx_trace) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if (self.onnx_trace and attn.size(1) == 1): # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) if need_weights: attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) else: attn_weights = None return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask, prev_key_padding_mask, batch_size, src_len, static_kv, ): # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1)).bool() if prev_key_padding_mask.is_cuda: filler = filler.cuda() key_padding_mask = torch.cat((prev_key_padding_mask, filler), dim=1) elif key_padding_mask is not None: filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1)).bool() if key_padding_mask.is_cuda: filler = filler.cuda() key_padding_mask = torch.cat((filler, key_padding_mask), dim=1) return key_padding_mask def reorder_incremental_state(self, incremental_state, new_order): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): if input_buffer[k] is not None: input_buffer[k] = input_buffer[k].index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state( self, incremental_state, 'attn_state', ) or {} def _set_input_buffer(self, incremental_state, buffer): utils.set_incremental_state( self, incremental_state, 'attn_state', buffer, ) def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + '.' if name != '' else '' items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + 'in_proj_weight'): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + 'q_proj.weight'] = state_dict[k][:dim] items_to_add[prefix + 'k_proj.weight'] = state_dict[k][dim:2*dim] items_to_add[prefix + 'v_proj.weight'] = state_dict[k][2*dim:] keys_to_remove.append(k) k_bias = prefix + 'in_proj_bias' if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + 'q_proj.bias'] = state_dict[k_bias][:dim] items_to_add[prefix + 'k_proj.bias'] = state_dict[k_bias][dim:2*dim] items_to_add[prefix + 'v_proj.bias'] = state_dict[k_bias][2*dim:] keys_to_remove.append(prefix + 'in_proj_bias') for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals from collections.abc import Iterable from itertools import repeat import torch import torch.nn as nn def _pair(v): if isinstance(v, Iterable): assert len(v) == 2, "len(v) != 2" return v return tuple(repeat(v, 2)) def infer_conv_output_dim(conv_op, input_dim, sample_inchannel): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim) # N x C x H x W # N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim x = conv_op(x) # N x C x H x W x = x.transpose(1, 2) # N x H x C x W bsz, seq = x.size()[:2] per_channel_dim = x.size()[3] # bsz: N, seq: H, CxW the rest return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim class VGGBlock(torch.nn.Module): """ VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf Args: in_channels: (int) number of input channels (typically 1) out_channels: (int) number of output channels conv_kernel_size: convolution channels pooling_kernel_size: the size of the pooling window to take a max over num_conv_layers: (int) number of convolution layers input_dim: (int) input dimension conv_stride: the stride of the convolving kernel. Can be a single number or a tuple (sH, sW) Default: 1 padding: implicit paddings on both sides of the input. Can be a single number or a tuple (padH, padW). Default: None layer_norm: (bool) if layer norm is going to be applied. Default: False Shape: Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) """ def __init__( self, in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim, conv_stride=1, padding=None, layer_norm=False, ): assert ( input_dim is not None ), "Need input_dim for LayerNorm and infer_conv_output_dim" super(VGGBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.conv_kernel_size = _pair(conv_kernel_size) self.pooling_kernel_size = _pair(pooling_kernel_size) self.num_conv_layers = num_conv_layers self.padding = ( tuple(e // 2 for e in self.conv_kernel_size) if padding is None else _pair(padding) ) self.conv_stride = _pair(conv_stride) self.layers = nn.ModuleList() for layer in range(num_conv_layers): conv_op = nn.Conv2d( in_channels if layer == 0 else out_channels, out_channels, self.conv_kernel_size, stride=self.conv_stride, padding=self.padding, ) self.layers.append(conv_op) if layer_norm: conv_output_dim, per_channel_dim = infer_conv_output_dim( conv_op, input_dim, in_channels if layer == 0 else out_channels ) self.layers.append(nn.LayerNorm(per_channel_dim)) input_dim = per_channel_dim self.layers.append(nn.ReLU()) if self.pooling_kernel_size is not None: pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True) self.layers.append(pool_op) self.total_output_dim, self.output_dim = infer_conv_output_dim( pool_op, input_dim, out_channels ) def forward(self, x): for i, _ in enumerate(self.layers): x = self.layers[i](x) return x
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/vggblock.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F class MeanPoolGatingNetwork(torch.nn.Module): """A simple mean-pooling gating network for selecting experts. This module applies mean pooling over an encoder's output and returns reponsibilities for each expert. The encoder format is expected to match :class:`fairseq.models.transformer.TransformerEncoder`. """ def __init__(self, embed_dim, num_experts, dropout=None): super().__init__() self.embed_dim = embed_dim self.num_experts = num_experts self.fc1 = torch.nn.Linear(embed_dim, embed_dim) self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None self.fc2 = torch.nn.Linear(embed_dim, num_experts) def forward(self, encoder_out): if not ( hasattr(encoder_out, 'encoder_out') and hasattr(encoder_out, 'encoder_padding_mask') and encoder_out.encoder_out.size(2) == self.embed_dim ): raise ValueError('Unexpected format for encoder_out') # mean pooling over time encoder_padding_mask = encoder_out.encoder_padding_mask # B x T encoder_out = encoder_out.encoder_out.transpose(0, 1) # B x T x C if encoder_padding_mask is not None: encoder_out = encoder_out.clone() # required because of transpose above encoder_out[encoder_padding_mask] = 0 ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True) x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out) else: x = torch.mean(encoder_out, dim=1) x = torch.tanh(self.fc1(x)) if self.dropout is not None: x = self.dropout(x) x = self.fc2(x) return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/mean_pool_gating_network.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq import utils class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int, ): super().__init__(num_embeddings, embedding_dim, padding_idx) self.onnx_trace = False def forward(self, input, incremental_state=None, positions=None): """Input is expected to be of size [bsz x seqlen].""" assert ( (positions is None) or (self.padding_idx is None) ), "If positions is pre-computed then padding_idx should not be set." if positions is None: if incremental_state is not None: # positions is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX positions = input.data.new(1, 1).fill_(int(self.padding_idx + input.size(1))) else: positions = utils.make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace, ) return super().forward(positions) def max_positions(self): """Maximum number of supported positions.""" if self.padding_idx is not None: return self.num_embeddings - self.padding_idx - 1 else: return self.num_embeddings
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/learned_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class GradMultiply(torch.autograd.Function): @staticmethod def forward(ctx, x, scale): ctx.scale = scale res = x.new(x) return res @staticmethod def backward(ctx, grad): return grad * ctx.scale, None
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/grad_multiply.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from torch import nn from typing import List, Tuple from .highway import Highway from fairseq.data import Dictionary CHAR_PAD_IDX = 0 CHAR_EOS_IDX = 257 class CharacterTokenEmbedder(torch.nn.Module): def __init__( self, vocab: Dictionary, filters: List[Tuple[int, int]], char_embed_dim: int, word_embed_dim: int, highway_layers: int, max_char_len: int = 50, char_inputs: bool = False ): super(CharacterTokenEmbedder, self).__init__() self.onnx_trace = False self.embedding_dim = word_embed_dim self.max_char_len = max_char_len self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0) self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim)) self.eos_idx, self.unk_idx = 0, 1 self.char_inputs = char_inputs self.convolutions = nn.ModuleList() for width, out_c in filters: self.convolutions.append( nn.Conv1d(char_embed_dim, out_c, kernel_size=width) ) last_dim = sum(f[1] for f in filters) self.highway = Highway(last_dim, highway_layers) if highway_layers > 0 else None self.projection = nn.Linear(last_dim, word_embed_dim) assert vocab is not None or char_inputs, "vocab must be set if not using char inputs" self.vocab = None if vocab is not None: self.set_vocab(vocab, max_char_len) self.reset_parameters() def prepare_for_onnx_export_(self): self.onnx_trace = True def set_vocab(self, vocab, max_char_len): word_to_char = torch.LongTensor(len(vocab), max_char_len) truncated = 0 for i in range(len(vocab)): if i < vocab.nspecial: char_idxs = [0] * max_char_len else: chars = vocab[i].encode() # +1 for padding char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars)) if len(char_idxs) > max_char_len: truncated += 1 char_idxs = char_idxs[:max_char_len] word_to_char[i] = torch.LongTensor(char_idxs) if truncated > 0: print('Truncated {} words longer than {} characters'.format(truncated, max_char_len)) self.vocab = vocab self.word_to_char = word_to_char @property def padding_idx(self): return Dictionary().pad() if self.vocab is None else self.vocab.pad() def reset_parameters(self): nn.init.xavier_normal_(self.char_embeddings.weight) nn.init.xavier_normal_(self.symbol_embeddings) nn.init.xavier_uniform_(self.projection.weight) nn.init.constant_(self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.) nn.init.constant_(self.projection.bias, 0.) def forward( self, input: torch.Tensor, ): if self.char_inputs: chars = input.view(-1, self.max_char_len) pads = chars[:, 0].eq(CHAR_PAD_IDX) eos = chars[:, 0].eq(CHAR_EOS_IDX) if eos.any(): if self.onnx_trace: chars = torch.where(eos.unsqueeze(1), chars.new_zeros(1), chars) else: chars[eos] = 0 unk = None else: flat_words = input.view(-1) chars = self.word_to_char[flat_words.type_as(self.word_to_char)].type_as(input) pads = flat_words.eq(self.vocab.pad()) eos = flat_words.eq(self.vocab.eos()) unk = flat_words.eq(self.vocab.unk()) word_embs = self._convolve(chars) if self.onnx_trace: if pads.any(): word_embs = torch.where(pads.unsqueeze(1), word_embs.new_zeros(1), word_embs) if eos.any(): word_embs = torch.where(eos.unsqueeze(1), self.symbol_embeddings[self.eos_idx], word_embs) if unk is not None and unk.any(): word_embs = torch.where(unk.unsqueeze(1), self.symbol_embeddings[self.unk_idx], word_embs) else: if pads.any(): word_embs[pads] = 0 if eos.any(): word_embs[eos] = self.symbol_embeddings[self.eos_idx] if unk is not None and unk.any(): word_embs[unk] = self.symbol_embeddings[self.unk_idx] return word_embs.view(input.size()[:2] + (-1,)) def _convolve( self, char_idxs: torch.Tensor, ): char_embs = self.char_embeddings(char_idxs) char_embs = char_embs.transpose(1, 2) # BTC -> BCT conv_result = [] for i, conv in enumerate(self.convolutions): x = conv(char_embs) x, _ = torch.max(x, -1) x = F.relu(x) conv_result.append(x) x = torch.cat(conv_result, dim=-1) if self.highway is not None: x = self.highway(x) x = self.projection(x) return x
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.modules import TransformerSentenceEncoderLayer from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): """ Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) """ def __init__( self, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = 'relu', add_bias_kv: bool = False, add_zero_attn: bool = False, export: bool = False, is_bidirectional: bool = True, stride: int = 32, expressivity: int = 8, ) -> None: super().__init__( embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, add_bias_kv, add_zero_attn, export ) self.self_attn = SparseMultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity, )
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import operator import functools import torch import torch.nn.functional as F from torch import nn class TiedLinear(nn.Module): def __init__(self, weight, transpose): super().__init__() self.weight = weight self.transpose = transpose def forward(self, input): return F.linear(input, self.weight.t() if self.transpose else self.weight) class TiedHeadModule(nn.Module): def __init__(self, weights, input_dim, num_classes): super().__init__() tied_emb, _ = weights self.num_words, emb_dim = tied_emb.size() self.word_proj = TiedLinear(tied_emb, transpose=False) if input_dim != emb_dim: self.word_proj = nn.Sequential( nn.Linear(input_dim, emb_dim, bias=False), self.word_proj, ) self.class_proj = nn.Linear(input_dim, num_classes, bias=False) self.out_dim = self.num_words + num_classes self.register_buffer('_float_tensor', torch.FloatTensor(1)) def forward(self, input): inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1) out = self._float_tensor.new(inp_sz, self.out_dim) out[:, :self.num_words] = self.word_proj(input.view(inp_sz, -1)) out[:, self.num_words:] = self.class_proj(input.view(inp_sz, -1)) return out class AdaptiveSoftmax(nn.Module): """ This is an implementation of the efficient softmax approximation for graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs" (http://arxiv.org/abs/1609.04309). """ def __init__(self, vocab_size, input_dim, cutoff, dropout, factor=4., adaptive_inputs=None, tie_proj=False): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert vocab_size == cutoff[ -1], 'cannot specify cutoff larger than vocab size' output_dim = cutoff[0] + len(cutoff) - 1 self.vocab_size = vocab_size self.cutoff = cutoff self.dropout = dropout self.input_dim = input_dim self.factor = factor self.lsm = nn.LogSoftmax(dim=1) if adaptive_inputs is not None: self.head = TiedHeadModule(adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1) else: self.head = nn.Linear(input_dim, output_dim, bias=False) self._make_tail(adaptive_inputs, tie_proj) def init_weights(m): if hasattr(m, 'weight') and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer('version', torch.LongTensor([1])) def _make_tail(self, adaptive_inputs=None, tie_proj=False): self.tail = nn.ModuleList() for i in range(len(self.cutoff) - 1): dim = int(self.input_dim // self.factor ** (i + 1)) tied_emb, tied_proj = adaptive_inputs.weights_for_band(i + 1) \ if adaptive_inputs is not None else (None, None) if tied_proj is not None: if tie_proj: proj = TiedLinear(tied_proj, transpose=True) else: proj = nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False) else: proj = nn.Linear(self.input_dim, dim, bias=False) m = nn.Sequential( proj, nn.Dropout(self.dropout), nn.Linear( dim, self.cutoff[i + 1] - self.cutoff[i], bias=False, ) if tied_emb is None else TiedLinear(tied_emb, transpose=False), ) self.tail.append(m) def upgrade_state_dict_named(self, state_dict, name): version_name = name + '.version' if version_name not in state_dict: raise Exception('This version of the model is no longer supported') def adapt_target(self, target): """ In order to be efficient, the AdaptiveSoftMax does not compute the scores for all the word of the vocabulary for all the examples. It is thus necessary to call the method adapt_target of the AdaptiveSoftMax layer inside each forward pass. """ target = target.view(-1) new_target = [target.clone()] target_idxs = [] for i in range(len(self.cutoff) - 1): mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1])) new_target[0][mask] = self.cutoff[0] + i if mask.any(): target_idxs.append(mask.nonzero().squeeze(1)) new_target.append(target[mask].add(-self.cutoff[i])) else: target_idxs.append(None) new_target.append(None) return new_target, target_idxs def forward(self, input, target): """ Args: input: (b x t x d) target: (b x t) Returns: 2 lists: output for each cutoff section and new targets by cut off """ input = input.contiguous().view(-1, input.size(-1)) input = F.dropout(input, p=self.dropout, training=self.training) new_target, target_idxs = self.adapt_target(target) output = [self.head(input)] for i in range(len(target_idxs)): if target_idxs[i] is not None: output.append(self.tail[i](input.index_select(0, target_idxs[i]))) else: output.append(None) return output, new_target def get_log_prob(self, input, target): """ Computes the log probabilities for all the words of the vocabulary, given a 2D tensor of hidden vectors. """ bsz, length, dim = input.size() input = input.contiguous().view(-1, dim) if target is not None: _, target_idxs = self.adapt_target(target) else: target_idxs = None head_y = self.head(input) log_probs = head_y.new_zeros(input.size(0), self.vocab_size) head_sz = self.cutoff[0] + len(self.tail) log_probs[:, :head_sz] = self.lsm(head_y) tail_priors = log_probs[:, self.cutoff[0]: head_sz].clone() for i in range(len(self.tail)): start = self.cutoff[i] end = self.cutoff[i + 1] if target_idxs is None: tail_out = log_probs[:, start:end] tail_out.copy_(self.tail[i](input)) log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None]) elif target_idxs[i] is not None: idxs = target_idxs[i] tail_out = log_probs[idxs, start:end] tail_out.copy_(self.tail[i](input[idxs])) log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None]) log_probs = log_probs.view(bsz, length, -1) return log_probs
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/adaptive_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch.nn.modules.utils import _single class ConvTBC(torch.nn.Module): """1D convolution over an input of shape (time x batch x channel) The implementation uses gemm to perform the convolution. This implementation is faster than cuDNN for small kernel sizes. """ def __init__(self, in_channels, out_channels, kernel_size, padding=0): super(ConvTBC, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _single(kernel_size) self.padding = _single(padding) self.weight = torch.nn.Parameter(torch.Tensor( self.kernel_size[0], in_channels, out_channels)) self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) def forward(self, input): return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0]) def __repr__(self): s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}' ', padding={padding}') if self.bias is None: s += ', bias=False' s += ')' return s.format(name=self.__class__.__name__, **self.__dict__)
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/conv_tbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn.functional as F def unfold1d(x, kernel_size, padding_l, pad_value=0): '''unfold T x B x C to T x B x C x K''' if kernel_size > 1: T, B, C = x.size() x = F.pad(x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value) x = x.as_strided((T, B, C, kernel_size), (B*C, C, 1, B*C)) else: x = x.unsqueeze(3) return x
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/unfold.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with the corresponding GitHub repo: https://github.com/hendrycks/GELUs """ import math import torch def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) def gelu(x: torch.Tensor) -> torch.Tensor: if hasattr(torch.nn.functional, 'gelu'): return torch.nn.functional.gelu(x.float()).type_as(x) else: return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/gelu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules import ( LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoderLayer, ) import random def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=0.02) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): module.q_proj.weight.data.normal_(mean=0.0, std=0.02) module.k_proj.weight.data.normal_(mean=0.0, std=0.02) module.v_proj.weight.data.normal_(mean=0.0, std=0.02) class TransformerSentenceEncoder(nn.Module): """ Implementation for a Bi-directional Transformer based Sentence Encoder used in BERT/XLM style pre-trained models. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). After applying the specified number of TransformerEncoderLayers, it outputs all the internal states of the encoder as well as the final representation associated with the first token (usually CLS token). Input: - tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Output: - a tuple of the following: - a list of internal model states used to compute the predictions where each tensor has shape B x T x C - sentence representation associated with first input token in format B x C. """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, layerdrop : float = 0.0, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, add_bias_kv: bool = False, add_zero_attn: bool = False, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, traceable: bool = False, ) -> None: super().__init__() self.padding_idx = padding_idx self.vocab_size = vocab_size self.dropout = dropout self.layerdrop = layerdrop self.max_seq_len = max_seq_len self.embedding_dim = embedding_dim self.num_segments = num_segments self.use_position_embeddings = use_position_embeddings self.apply_bert_init = apply_bert_init self.learned_pos_embedding = learned_pos_embedding self.traceable = traceable self.embed_tokens = nn.Embedding( self.vocab_size, self.embedding_dim, self.padding_idx ) self.embed_scale = embed_scale self.segment_embeddings = ( nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None) if self.num_segments > 0 else None ) self.embed_positions = ( PositionalEmbedding( self.max_seq_len, self.embedding_dim, padding_idx=(self.padding_idx if offset_positions_by_padding else None), learned=self.learned_pos_embedding, ) if self.use_position_embeddings else None ) self.layers = nn.ModuleList( [ TransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, export=export, ) for _ in range(num_encoder_layers) ] ) if encoder_normalize_before: self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export) else: self.emb_layer_norm = None # Apply initialization of model params after building the model if self.apply_bert_init: self.apply(init_bert_params) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False if freeze_embeddings: freeze_module_params(self.embed_tokens) freeze_module_params(self.segment_embeddings) freeze_module_params(self.embed_positions) freeze_module_params(self.emb_layer_norm) for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer]) def forward( self, tokens: torch.Tensor, segment_labels: torch.Tensor = None, last_state_only: bool = False, positions: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: # compute padding mask. This is needed for multi-head attention padding_mask = tokens.eq(self.padding_idx) if not self.traceable and not padding_mask.any(): padding_mask = None x = self.embed_tokens(tokens) if self.embed_scale is not None: x *= self.embed_scale if self.embed_positions is not None: x += self.embed_positions(tokens, positions=positions) if self.segment_embeddings is not None and segment_labels is not None: x += self.segment_embeddings(segment_labels) if self.emb_layer_norm is not None: x = self.emb_layer_norm(x) x = F.dropout(x, p=self.dropout, training=self.training) # account for padding while computing the representation if padding_mask is not None: x *= 1 - padding_mask.unsqueeze(-1).type_as(x) # B x T x C -> T x B x C x = x.transpose(0, 1) inner_states = [] if not last_state_only: inner_states.append(x) for layer in self.layers: # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not self.training or (dropout_probability > self.layerdrop): x, _ = layer(x, self_attn_padding_mask=padding_mask) if not last_state_only: inner_states.append(x) # T x B x C -> B x T x C x = x.transpose(0, 1) sentence_rep = x[:, 0, :] if last_state_only: inner_states = [x] if self.traceable: return torch.stack(inner_states), sentence_rep else: return inner_states, sentence_rep
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .adaptive_input import AdaptiveInput from .adaptive_softmax import AdaptiveSoftmax from .beamable_mm import BeamableMM from .character_token_embedder import CharacterTokenEmbedder from .conv_tbc import ConvTBC from .downsampled_multihead_attention import DownsampledMultiHeadAttention from .dynamic_convolution import DynamicConv, DynamicConv1dTBC from .gelu import gelu, gelu_accurate from .grad_multiply import GradMultiply from .highway import Highway from .layer_norm import LayerNorm from .learned_positional_embedding import LearnedPositionalEmbedding from .lightweight_convolution import LightweightConv, LightweightConv1dTBC from .linearized_convolution import LinearizedConvolution from .logsumexp_moe import LogSumExpMoE from .mean_pool_gating_network import MeanPoolGatingNetwork from .multihead_attention import MultiheadAttention from .positional_embedding import PositionalEmbedding from .scalar_bias import ScalarBias from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer from .transformer_sentence_encoder import TransformerSentenceEncoder from .unfold import unfold1d from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer from .vggblock import VGGBlock __all__ = [ 'AdaptiveInput', 'AdaptiveSoftmax', 'BeamableMM', 'CharacterTokenEmbedder', 'ConvTBC', 'DownsampledMultiHeadAttention', 'DynamicConv1dTBC', 'DynamicConv', 'gelu', 'gelu_accurate', 'GradMultiply', 'Highway', 'LayerNorm', 'LearnedPositionalEmbedding', 'LightweightConv1dTBC', 'LightweightConv', 'LinearizedConvolution', 'LogSumExpMoE', 'MeanPoolGatingNetwork', 'MultiheadAttention', 'PositionalEmbedding', 'ScalarBias', 'SinusoidalPositionalEmbedding', 'TransformerSentenceEncoderLayer', 'TransformerSentenceEncoder', 'TransformerDecoderLayer', 'TransformerEncoderLayer', 'VGGBlock', 'unfold1d', ]
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.modules import TransformerSentenceEncoder from fairseq.modules.sparse_transformer_sentence_encoder_layer import SparseTransformerSentenceEncoderLayer class SparseTransformerSentenceEncoder(TransformerSentenceEncoder): """ Sparse implementation of the TransformerSentenceEncoder - see SparseMultiheadAttention """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, add_bias_kv: bool = False, add_zero_attn: bool = False, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, is_bidirectional: bool = True, stride: int = 32, expressivity: int = 8, ) -> None: super().__init__( padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, add_bias_kv, add_zero_attn, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export ) self.layers = nn.ModuleList( [ SparseTransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity, ) for _ in range(num_encoder_layers) ] ) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer])
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from .conv_tbc import ConvTBC class LinearizedConvolution(ConvTBC): """An optimized version of nn.Conv1d. At training time, this module uses ConvTBC, which is an optimized version of Conv1d. At inference time, it optimizes incremental generation (i.e., one time step at a time) by replacing the convolutions with linear layers. Note that the input order changes from training to inference. """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__(in_channels, out_channels, kernel_size, **kwargs) self._linearized_weight = None self.register_backward_hook(self._clear_linearized_weight) def forward(self, input, incremental_state=None): """ Args: incremental_state: Used to buffer signal; if not None, then input is expected to contain a single frame. If the input order changes between time steps, call reorder_incremental_state. Input: Time x Batch x Channel during training Batch x Time x Channel during inference """ if incremental_state is None: output = super().forward(input) if self.kernel_size[0] > 1 and self.padding[0] > 0: # remove future timesteps added by padding output = output[:-self.padding[0], :, :] return output # reshape weight weight = self._get_linearized_weight() kw = self.kernel_size[0] bsz = input.size(0) # input: bsz x len x dim if kw > 1: input = input.data input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = input.new(bsz, kw, input.size(2)).zero_() self._set_input_buffer(incremental_state, input_buffer) else: # shift buffer input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() # append next input input_buffer[:, -1, :] = input[:, -1, :] input = input_buffer with torch.no_grad(): output = F.linear(input.view(bsz, -1), weight, self.bias) return output.view(bsz, 1, -1) def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def _get_linearized_weight(self): if self._linearized_weight is None: kw = self.kernel_size[0] weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() assert weight.size() == (self.out_channels, kw, self.in_channels) self._linearized_weight = torch.nn.Parameter(weight.view(self.out_channels, -1)) return self._linearized_weight def _clear_linearized_weight(self, *args): self._linearized_weight = None
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/linearized_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.modules import LayerNorm, MultiheadAttention class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, attn_mask=None): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where T_tgt is the length of query, while T_src is the length of key, though here both query and key is x here, attn_mask[t_tgt, t_src] = 1 means when calculating embedding for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if attn_mask is not None: attn_mask = attn_mask.masked_fill(attn_mask.bool(), -1e8) # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters # TODO: to formally solve this problem, we need to change fairseq's # MultiheadAttention. We will do this later on. x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.cross_self_attention = getattr(args, 'cross_self_attention', False) self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=not self.cross_self_attention, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, need_attn=False, need_head_weights=False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ if need_head_weights: need_attn = True residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state[:2] saved_state = {"prev_key": prev_key, "prev_value": prev_value} if len(prev_self_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] self.self_attn._set_input_buffer(incremental_state, saved_state) if self.cross_self_attention and not (incremental_state is not None and "prev_key" in self.self_attn._get_input_buffer(incremental_state)): if self_attn_mask is not None: self_attn_mask = torch.cat((x.new(x.size(0), encoder_out.size(0)).zero_(), self_attn_mask), dim=1) if self_attn_padding_mask is not None: if encoder_padding_mask is None: encoder_padding_mask = self_attn_padding_mask.new(encoder_out.size(1), encoder_out.size(0)).zero_() self_attn_padding_mask = torch.cat((encoder_padding_mask, self_attn_padding_mask), dim=1) y = torch.cat((encoder_out, x), dim=0) else: y = x x, attn = self.self_attn( query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state[:2] saved_state = {"prev_key": prev_key, "prev_value": prev_value} if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) if self_attn_padding_mask is not None: self_attn_state = saved_state["prev_key"], saved_state["prev_value"], saved_state["prev_key_padding_mask"] else: self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.modules import ( LayerNorm, MultiheadAttention, ) class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = 'relu', add_bias_kv: bool = False, add_zero_attn: bool = False, export: bool = False, ) -> None: super().__init__() # Initialize parameters self.embedding_dim = embedding_dim self.dropout = dropout self.activation_dropout = activation_dropout # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = MultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export) self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim, export=export) def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer imlementation. """ residual = x x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return x, attn
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn class Highway(torch.nn.Module): """ A `Highway layer <https://arxiv.org/abs/1505.00387>`_. Adopted from the AllenNLP implementation. """ def __init__( self, input_dim: int, num_layers: int = 1 ): super(Highway, self).__init__() self.input_dim = input_dim self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]) self.activation = nn.ReLU() self.reset_parameters() def reset_parameters(self): for layer in self.layers: # As per comment in AllenNLP: # We should bias the highway layer to just carry its input forward. We do that by # setting the bias on `B(x)` to be positive, because that means `g` will be biased to # be high, so we will carry the input forward. The bias on `B(x)` is the second half # of the bias vector in each Linear layer. nn.init.constant_(layer.bias[self.input_dim:], 1) nn.init.constant_(layer.bias[:self.input_dim], 0) nn.init.xavier_normal_(layer.weight) def forward( self, x: torch.Tensor ): for layer in self.layers: projection = layer(x) proj_x, gate = projection.chunk(2, dim=-1) proj_x = self.activation(proj_x) gate = torch.sigmoid(gate) x = gate * x + (gate.new_tensor([1]) - gate) * proj_x return x
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/highway.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from .unfold import unfold1d def DynamicConv(input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False): if torch.cuda.is_available(): try: from fairseq.modules.dynamicconv_layer import DynamicconvLayer return DynamicconvLayer(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) except ImportError as e: print(e) return DynamicConv1dTBC(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m class DynamicConv1dTBC(nn.Module): '''Dynamic lightweight convolution taking T x B x C inputs Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1) bias: use bias conv_bias: bias of the convolution query_size: specified when feeding a different input as the query in_proj: project the input and generate the filter together Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` ''' def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False): super().__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout = weight_dropout self.weight_softmax = weight_softmax self.renorm_padding = renorm_padding if in_proj: self.weight_linear = Linear(self.input_size, self.input_size + num_heads * kernel_size * 1) else: self.weight_linear = Linear(self.query_size, num_heads * kernel_size * 1, bias=bias) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() @property def in_proj(self): return self.weight_linear.out_features == self.input_size + self.num_heads * self.kernel_size def reset_parameters(self): self.weight_linear.reset_parameters() if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.) def forward(self, x, incremental_state=None, query=None, unfold=None): '''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead query: use the specified query to predict the conv filters ''' unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None or not self.in_proj if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def _forward_unfolded(self, x, incremental_state, query): '''The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1) else: weight = self.weight_linear(query).view(T*B*H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K-1: weight = weight.narrow(1, K-T, T) K, padding_l = T, T-1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T*B*H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): '''Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. ''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1) else: weight = self.weight_linear(query).view(T*B*H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B*H, K).transpose(0, 1) x = x.view(T, B*H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf')) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training, inplace=False) else: P = self.padding_l # For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K-1: weight = weight.narrow(2, K-T, T) K, P = T, T-1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def extra_repr(self): s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}'.format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.conv_bias is not None, self.renorm_padding, self.in_proj, ) if self.query_size != self.input_size: s += ', query_size={}'.format(self.query_size) if self.weight_dropout > 0.: s += ', weight_dropout={}'.format(self.weight_dropout) return s
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/dynamic_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from typing import List class AdaptiveInput(nn.Module): def __init__( self, vocab_size: int, padding_idx: int, initial_dim: int, factor: float, output_dim: int, cutoff: List[int], ): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert vocab_size == cutoff[ -1], 'cannot specify cutoff larger than vocab size' self.cutoff = cutoff self.embedding_dim = output_dim self.padding_idx = padding_idx self.embeddings = nn.ModuleList() for i in range(len(self.cutoff)): prev = self.cutoff[i - 1] if i > 0 else 0 size = self.cutoff[i] - prev dim = int(initial_dim // (factor ** i)) seq = nn.Sequential( nn.Embedding(size, dim, padding_idx), nn.Linear(dim, output_dim, bias=False) ) self.embeddings.append(seq) def init_weights(m): if isinstance(m, nn.Embedding): nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) elif hasattr(m, 'weight'): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer('_float_tensor', torch.FloatTensor(1)) def weights_for_band(self, band: int): return self.embeddings[band][0].weight, self.embeddings[band][1].weight def forward(self, input: torch.Tensor): result = self._float_tensor.new(input.shape + (self.embedding_dim,)) for i in range(len(self.cutoff)): mask = input.lt(self.cutoff[i]) if i > 0: mask.mul_(input.ge(self.cutoff[i - 1])) chunk_input = input[mask] - self.cutoff[i - 1] else: chunk_input = input[mask] if mask.any(): result[mask] = self.embeddings[i](chunk_input) return result
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/adaptive_input.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.modules.unfold import unfold1d def LightweightConv(input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, bias=False): if torch.cuda.is_available(): try: from fairseq.modules.lightconv_layer import LightconvLayer return LightconvLayer(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) except ImportError as e: print(e) return LightweightConv1dTBC(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) class LightweightConv1d(nn.Module): '''Lightweight Convolution assuming the input is BxCxT This is just an example that explains LightConv clearer than the TBC version. We don't use this module in the model. Args: input_size: # of channels of the input and output kernel_size: convolution channels padding: padding num_heads: number of heads used. The weight is of shape `(num_heads, 1, kernel_size)` weight_softmax: normalize the weight with softmax before the convolution Shape: Input: BxCxT, i.e. (batch_size, input_size, timesteps) Output: BxCxT, i.e. (batch_size, input_size, timesteps) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` ''' def __init__(self, input_size, kernel_size=1, padding=0, num_heads=1, weight_softmax=False, bias=False, weight_dropout=0.): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.num_heads = num_heads self.padding = padding self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.weight_dropout = weight_dropout self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.) def forward(self, input): ''' input size: B x C x T output size: B x C x T ''' B, C, T = input.size() H = self.num_heads weight = self.weight if self.weight_softmax: weight = F.softmax(weight, dim=-1) weight = F.dropout(weight, self.weight_dropout, training=self.training) # Merge every C/H entries into the batch dimension (C = self.input_size) # B x C x T -> (B * C/H) x H x T # One can also expand the weight to C x 1 x K by a factor of C/H # and do not reshape the input instead, which is slow though input = input.view(-1, H, T) output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads) output = output.view(B, C, T) if self.bias is not None: output = output + self.bias.view(1, -1, 1) return output class LightweightConv1dTBC(nn.Module): '''Lightweight Convolution assuming the input is TxBxC Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution bias: use bias Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` ''' def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, bias=False): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout = weight_dropout self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() self.onnx_trace = False def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.) def forward(self, x, incremental_state=None, unfold=False): '''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead ''' unfold = unfold or (incremental_state is not None) if unfold: output = self._forward_unfolded(x, incremental_state) else: output = self._forward_expanded(x, incremental_state) if self.bias is not None: output = output + self.bias.view(1, 1, -1) return output def prepare_for_onnx_export_(self): self.onnx_trace = True def _forward_unfolded(self, x, incremental_state): '''The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) else: # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0) x_unfold = x_unfold.view(T*B*H, R, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) weight = weight.view(1, H, K).expand(T*B, H, K).contiguous().view(T*B*H, K, 1) weight = F.dropout(weight, self.weight_dropout, training=self.training) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_state): '''Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. ''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight) weight = weight.view(1, H, K).expand(T*B, H, K).contiguous() weight = weight.view(T, B*H, K).transpose(0, 1) x = x.view(T, B*H, R).transpose(0, 1) P = self.padding_l if K > T and P == K-1: weight = weight.narrow(2, K-T, T) K, P = T, T-1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training) output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def extra_repr(self): s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}'.format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.bias is not None ) if self.weight_dropout > 0.: s += ', weight_dropout={}'.format(self.weight_dropout) return s
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/lightweight_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import torch class ScalarBias(torch.autograd.Function): """ Adds a vector of scalars, used in self-attention mechanism to allow the model to optionally attend to this vector instead of the past """ @staticmethod def forward(ctx, input, dim, bias_init): size = list(input.size()) size[dim] += 1 output = input.new(*size).fill_(bias_init) output.narrow(dim, 1, size[dim] - 1).copy_(input) ctx.dim = dim return output @staticmethod def backward(ctx, grad): return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None def scalar_bias(input, dim, bias_init=0): return ScalarBias.apply(input, dim, bias_init)
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/scalar_bias.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class LogSumExpMoE(torch.autograd.Function): """Standard LogSumExp forward pass, but use *posterior* for the backward. See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_. """ @staticmethod def forward(ctx, logp, posterior, dim=-1): ctx.save_for_backward(posterior) ctx.dim = dim return torch.logsumexp(logp, dim=dim) @staticmethod def backward(ctx, grad_output): posterior, = ctx.saved_tensors grad_logp = grad_output.unsqueeze(ctx.dim) * posterior return grad_logp, None, None
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/logsumexp_moe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from .learned_positional_embedding import LearnedPositionalEmbedding from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding def PositionalEmbedding( num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool = False, ): if learned: # if padding_idx is specified then offset the embedding ids by # this index and adjust num_embeddings appropriately # TODO: The right place for this offset would be inside # LearnedPositionalEmbedding. Move this there for a cleaner implementation. if padding_idx is not None: num_embeddings = num_embeddings + padding_idx + 1 m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) if padding_idx is not None: nn.init.constant_(m.weight[padding_idx], 0) else: m = SinusoidalPositionalEmbedding( embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, ) return m
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from .multihead_attention import MultiheadAttention class SparseMultiheadAttention(MultiheadAttention): """ Sparse Multi-Headed Attention. "Generating Long Sequences with Sparse Transformers". Implements fixed factorized self attention, where l=stride and c=expressivity. A(1) includes all words in the stride window and A(2) takes a summary of c words from the end of each stride window. If is_bidirectional=False, we do not include any words past the current word, as in the paper. """ def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, stride=32, expressivity=8, is_bidirectional=True): super().__init__( embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention ) self.is_bidirectional = is_bidirectional self.stride = stride self.expressivity = expressivity assert(self.stride > 0 and self.stride >= self.expressivity) # Used for Ai(2) calculations - beginning of [l-c, l] range def compute_checkpoint(self, word_index): if word_index % self.stride == 0 and word_index != 0: checkpoint_index = word_index - self.expressivity else: checkpoint_index = ( math.floor(word_index / self.stride) * self.stride + self.stride - self.expressivity ) return checkpoint_index # Computes Ai(2) def compute_subset_summaries(self, absolute_max): checkpoint_index = self.compute_checkpoint(0) subset_two = set() while checkpoint_index <= absolute_max-1: summary = set(range(checkpoint_index, min( checkpoint_index+self.expressivity+1, absolute_max) )) subset_two = subset_two.union(summary) checkpoint_index = self.compute_checkpoint(checkpoint_index+self.stride) return subset_two # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf def compute_fixed_attention_subset(self, word_index, tgt_len): # +1s account for range function; [min, max) -> [min, max] if not self.is_bidirectional: absolute_max = word_index + 1 else: absolute_max = tgt_len # Subset 1 - whole window rounded_index = math.floor((word_index + self.stride) / self.stride) * self.stride if word_index % self.stride == 0 and word_index != 0: subset_one = set(range(word_index-self.stride, min(absolute_max, word_index+1))) else: subset_one = set(range(max(0, rounded_index - self.stride), min( absolute_max, rounded_index+1)) ) # Subset 2 - summary per window # If bidirectional, subset 2 is the same for every index subset_two = set() if not self.is_bidirectional: subset_two = self.compute_subset_summaries(absolute_max) return subset_one.union(subset_two) # Compute sparse mask - if bidirectional, can pre-compute and store def buffered_sparse_mask(self, tensor, tgt_len, src_len): assert(tgt_len > self.stride) sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float('-inf')) # If bidirectional, subset 2 is the same for every index subset_summaries = set() if self.is_bidirectional: subset_summaries = self.compute_subset_summaries(tgt_len) for i in range(tgt_len): fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len) fixed_attention_subset = fixed_attention_subset.union(subset_summaries) included_word_indices = torch.LongTensor(list(fixed_attention_subset)) sparse_mask[i].index_fill_(0, included_word_indices, 0) return sparse_mask.type_as(tensor) def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len) sparse_mask = sparse_mask.unsqueeze(0).expand(bsz * self.num_heads, tgt_len, src_len) attn_weights += sparse_mask
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/sparse_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.onnx.operators from fairseq import utils class SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length. Padding symbols are ignored. """ def __init__(self, embedding_dim, padding_idx, init_size=1024): super().__init__() self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.weights = SinusoidalPositionalEmbedding.get_embedding( init_size, embedding_dim, padding_idx, ) self.onnx_trace = False self.register_buffer('_float_tensor', torch.FloatTensor(1)) def prepare_for_onnx_export_(self): self.onnx_trace = True @staticmethod def get_embedding(num_embeddings, embedding_dim, padding_idx=None): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb def forward(self, input, incremental_state=None, timestep=None, **kwargs): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = torch.onnx.operators.shape_as_tensor(input) max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): # recompute/expand embeddings if needed self.weights = SinusoidalPositionalEmbedding.get_embedding( max_pos, self.embedding_dim, self.padding_idx, ) self.weights = self.weights.to(self._float_tensor) if incremental_state is not None: # positions is the same for every token when decoding a single step pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len if self.onnx_trace: return self.weights.index_select(index=self.padding_idx + pos, dim=0).unsqueeze(1).repeat(bsz, 1, 1) return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) positions = utils.make_positions(input, self.padding_idx, onnx_trace=self.onnx_trace) if self.onnx_trace: flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) embedding_shape = torch.cat((bsz.view(1), seq_len.view(1), torch.LongTensor([-1]))) embeddings = torch.onnx.operators.reshape_from_tensor_shape(flat_embeddings, embedding_shape) return embeddings return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() def max_positions(self): """Maximum number of supported positions.""" return int(1e5) # an arbitrary large number
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/sinusoidal_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): if not export and torch.cuda.is_available(): try: from apex.normalization import FusedLayerNorm return FusedLayerNorm(normalized_shape, eps, elementwise_affine) except ImportError: pass return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/layer_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def gen_forward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "lightconv_cuda.cuh" std::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = filters.size(0); const auto filterSize = filters.size(1); const auto numFiltersInBlock = numFeatures / numHeads; const dim3 blocks(minibatch, numFeatures); auto output = at::zeros_like(input); auto stream = at::cuda::getCurrentCUDAStream(); """ sequence_if = """ if (sequenceLength <= {seq}) {{ switch(filterSize) {{ """ case_k = """ case {k}: """ main_block = """ if (padding_l == {pad}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{ lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), filters.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, output.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; } break; """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; } """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ final_return = """ } return {output}; } """ with open("lightconv_cuda_forward.cu", 'w') as forward: forward.write(head) for seq in seqs: forward.write(sequence_if.format(seq=seq)) for k in kernels: forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=seq, pad=pad)) forward.write(bad_padding) forward.write(bad_filter) forward.write(con_else) forward.write(final_else) for k in kernels: forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=seq, pad=pad)) forward.write(bad_padding) forward.write(bad_filter) forward.write(final_return) def gen_backward(): head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "lightconv_cuda.cuh" std::vector<at::Tensor> lightconv_cuda_backward( at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor filters) { // gradWrtInput const int minibatch = input.size(0); const int numFeatures = input.size(1); const int sequenceLength = input.size(2); const int numHeads = filters.size(0); const int filterSize = filters.size(1); const dim3 gradBlocks(minibatch, numFeatures); const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads); const dim3 weightGradSecondpassBlocks(numHeads, filterSize); const int numFiltersInBlock = numFeatures / numHeads; auto gradInput = at::zeros_like(input); auto gradFilters = at::zeros_like(filters); at::DeviceGuard g(input.device()); auto stream = at::cuda::getCurrentCUDAStream(); switch(filterSize) { """ sequence_if = """ if (sequenceLength <= {seq}) {{ """ case_k = """ case {k}: """ main_block = """ if (padding_l == {p}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{ lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t> <<<gradBlocks, {b_size}, 0, stream>>>( gradOutput.data<scalar_t>(), filters.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, gradInput.data<scalar_t>()); """ weight_grad_short = """ at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat)); lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t> <<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), gradOutput.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, tempSumGradFilters.data<float>() ); lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t> <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>( tempSumGradFilters.data<float>(), minibatch, numFiltersInBlock, gradFilters.data<scalar_t>() ); }})); }} else """ weight_grad = """ at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat)); lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t> <<<gradBlocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), gradOutput.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, tempSumGradFilters.data<float>() ); lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t> <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>( tempSumGradFilters.data<float>(), minibatch, numFiltersInBlock, gradFilters.data<scalar_t>() ); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; } """ breakout = """ break; """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ last_return = """ } return {gradInput, gradFilters}; } """ kernels = [3, 5, 7, 15, 31, 63, 127, 255] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] thresh = [32, 32, 64, 128, 256, -1, -1, -1] max_mem = [-1, -1, -1, -1, -1, 192, 96, 64] with open("lightconv_cuda_backward.cu", 'w') as backward: backward.write(head) for (k, t, mem) in zip(kernels, thresh, max_mem): backward.write(case_k.format(k=k)) for seq in seqs: if (t == -1 or seq <= t) and (mem == -1 or seq < mem): backward.write(sequence_if.format(seq=seq)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=seq, p=p)) backward.write(weight_grad_short.format(k=k, b_size=seq, p=p)) backward.write(bad_padding) else: for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=32, p=p)) backward.write(weight_grad.format(k=k, b_size=32, p=p)) backward.write(bad_padding) backward.write(breakout) break backward.write(con_else) backward.write(bad_filter) backward.write(last_return) if __name__ == "__main__": gen_forward() gen_backward()
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/lightconv_layer/cuda_function_gen.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .lightconv_layer import LightconvLayer # noqa
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/lightconv_layer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.autograd import Function import torch.nn.functional as F import lightconv_cuda from fairseq import utils class lightconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = lightconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = lightconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_variables) grad_input, grad_weights = outputs return grad_input, grad_weights, None class LightconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0., bias=False): super(LightconvLayer, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout = weight_dropout self.weight = nn.Parameter(torch.Tensor(num_heads, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.) def forward(self, x, incremental_state=None): # during inference time, incremental BMM is faster if incremental_state is not None: T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) weight = self.weight if self.weight_softmax: weight = F.softmax(weight.float(), dim=1).type_as(weight) weight = weight[:, -x_unfold.size(2):] K = weight.size(1) weight = weight.view(1, H, K).expand(T*B, H, K).contiguous().view(T*B*H, K, 1) weight = F.dropout(weight, self.weight_dropout, training=self.training) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output # during training time, use CUDA kernel else: x = x.permute(1, 2, 0).contiguous() weight = self.weight if self.weight_softmax: weight = F.softmax(self.weight, -1) if self.weight_dropout: weight = F.dropout(weight, self.weight_dropout, training=self.training) return lightconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1) def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def half(self): print("HALF") return self._apply(lambda t: t.half() if t.is_floating_point() else t)
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/lightconv_layer/lightconv_layer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import CUDAExtension, BuildExtension setup( name='lightconv_layer', ext_modules=[ CUDAExtension('lightconv_cuda', [ 'lightconv_cuda.cpp', 'lightconv_cuda_kernel.cu', ]), ], cmdclass={ 'build_ext': BuildExtension })
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/lightconv_layer/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def gen_forward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] blocks = [32, 64, 128, 256] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "dynamicconv_cuda.cuh" std::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = weight.size(1); const auto filterSize = weight.size(2); const auto numFiltersInBlock = numFeatures / numHeads; const dim3 blocks(minibatch, numFeatures); auto output = at::zeros_like(input); auto stream = at::cuda::getCurrentCUDAStream(); """ switch = """ switch(filterSize) { """ case_k = """ case {k}: """ main_block = """ if (padding_l == {pad}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{ dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), weight.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, output.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; } break;\n """ end = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; } return {output}; } """ with open("dynamicconv_cuda_forward.cu", 'w') as forward: forward.write(head) forward.write(switch) for k in kernels: b_size = 32 for b in blocks: if b > k: b_size = b break forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=b_size, pad=pad)) forward.write(bad_padding) forward.write(end) def gen_backward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] thresh = [512, 512, 512, 512, 512, 380, 256, 256] min_block = [64, 64, 64, 64, 64, 64, 128, 256] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "dynamicconv_cuda.cuh" std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = weight.size(1); const auto filterSize = weight.size(2); const auto numFiltersInBlock = numFeatures / numHeads; auto numChunks = 1; auto gradInput = at::zeros_like(input); auto gradWeight = at::zeros_like(weight); auto stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(minibatch, numHeads, numChunks); """ sequence_if = """ if (sequenceLength < {seq}) {{ switch(filterSize) {{ """ case_k = """ case {k}: """ chunks_reset = """ numChunks = int(ceilf(sequenceLength/float({b_size}))); blocks = dim3(minibatch, numHeads, numChunks); """ main_block = """ if (padding_l == {p}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{ dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( gradOutput.data<scalar_t>(), input.data<scalar_t>(), weight.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, gradWeight.data<scalar_t>(), gradInput.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; } break;\n """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; } """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ last_return = """ } return {gradInput, gradWeight}; } """ with open("dynamicconv_cuda_backward.cu", 'w') as backward: backward.write(head) for seq in seqs: backward.write(sequence_if.format(seq=seq)) for k, t, m in zip(kernels, thresh, min_block): backward.write(case_k.format(k=k)) if seq <= t: b_size = seq else: b_size = m backward.write(chunks_reset.format(b_size=b_size)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=b_size, p=p)) backward.write(bad_padding) backward.write(bad_filter) backward.write(con_else) backward.write(final_else) for k, m in zip(kernels, min_block): backward.write(case_k.format(k=k)) backward.write(chunks_reset.format(b_size=m)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=m, p=p)) backward.write(bad_padding) backward.write(bad_filter) backward.write(last_return) if __name__ == "__main__": gen_forward() gen_backward()
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/dynamicconv_layer/cuda_function_gen.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .dynamicconv_layer import DynamicconvLayer # noqa
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/dynamicconv_layer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.autograd import Function import torch.nn.functional as F import dynamicconv_cuda from fairseq import utils from fairseq.modules.unfold import unfold1d class dynamicconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = dynamicconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = dynamicconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_variables) grad_input, grad_weights = outputs return grad_input, grad_weights, None class DynamicconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0., bias=False, renorm_padding=False, conv_bias=False, query_size=None): super(DynamicconvLayer, self).__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout = weight_dropout self.renorm_padding = renorm_padding self.bias = bias self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight_linear.weight) if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.) nn.init.constant_(self.weight_linaer.bias, 0.) def forward(self, x, incremental_state=None, query=None, unfold=None): T, B, C = x.size() K, H = self.kernel_size, self.num_heads # R = C // H # during inference time, incremental BMM is faster if incremental_state is not None: unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output # during training time, use CUDA kernel else: weight = self.weight_linear(x).view(T, B, H, K) if self.weight_softmax: weight = F.softmax(weight, dim=-1) if self.weight_dropout: weight = F.dropout(weight, self.weight_dropout, training=self.training) weight = weight.permute(1, 2, 3, 0).contiguous() self.filters = weight x = x.permute(1, 2, 0).contiguous() output = dynamicconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def _forward_unfolded(self, x, incremental_state, query): '''The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T*B*H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K-1: weight = weight.narrow(1, K-T, T) K, padding_l = T, T-1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T*B*H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): '''Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. ''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T*B*H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B*H, K).transpose(0, 1) x = x.view(T, B*H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf')) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training, inplace=False) else: P = self.padding_l # For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K-1: weight = weight.narrow(2, K-T, T) K, P = T, T-1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import CUDAExtension, BuildExtension setup( name='dynamicconv_layer', ext_modules=[ CUDAExtension( name='dynamicconv_cuda', sources=[ 'dynamicconv_cuda.cpp', 'dynamicconv_cuda_kernel.cu', ], ), ], cmdclass={ 'build_ext': BuildExtension })
data2vec_vision-main
infoxlm/fairseq/fairseq/modules/dynamicconv_layer/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset class OffsetTokensDataset(BaseWrapperDataset): def __init__(self, dataset, offset): super().__init__(dataset) self.offset = offset def __getitem__(self, idx): return self.dataset[idx] + self.offset
data2vec_vision-main
infoxlm/fairseq/fairseq/data/offset_tokens_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict import torch from torch.utils.data.dataloader import default_collate from . import FairseqDataset def _flatten(dico, prefix=None): """Flatten a nested dictionary.""" new_dico = OrderedDict() if isinstance(dico, dict): prefix = prefix + '.' if prefix is not None else '' for k, v in dico.items(): if v is None: continue new_dico.update(_flatten(v, prefix + k)) elif isinstance(dico, list): for i, v in enumerate(dico): new_dico.update(_flatten(v, prefix + '.[' + str(i) + ']')) else: new_dico = OrderedDict({prefix: dico}) return new_dico def _unflatten(dico): """Unflatten a flattened dictionary into a nested dictionary.""" new_dico = OrderedDict() for full_k, v in dico.items(): full_k = full_k.split('.') node = new_dico for k in full_k[:-1]: if k.startswith('[') and k.endswith(']'): k = int(k[1:-1]) if k not in node: node[k] = OrderedDict() node = node[k] node[full_k[-1]] = v return new_dico class NestedDictionaryDataset(FairseqDataset): def __init__(self, defn, sizes=None): super().__init__() self.defn = _flatten(defn) self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes first = None for v in self.defn.values(): if not isinstance(v, (FairseqDataset, torch.utils.data.Dataset, )): raise ValueError('Expected Dataset but found: {}'.format(v.__class__)) first = first or v if len(v) > 0: assert len(v) == len(first), 'dataset lengths must match' self._len = len(first) def __getitem__(self, index): return OrderedDict((k, ds[index]) for k, ds in self.defn.items()) def __len__(self): return self._len def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ if len(samples) == 0: return {} sample = OrderedDict() for k, ds in self.defn.items(): try: sample[k] = ds.collater([s[k] for s in samples]) except NotImplementedError: sample[k] = default_collate([s[k] for s in samples]) return _unflatten(sample) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max(s[index] for s in self.sizes) def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" if len(self.sizes) == 1: return self.sizes[0][index] else: return (s[index] for s in self.sizes) @property def supports_prefetch(self): """Whether this dataset supports prefetching.""" return any(ds.supports_prefetch for ds in self.defn.values()) def prefetch(self, indices): """Prefetch the data required for this epoch.""" for ds in self.defn.values(): if getattr(ds, 'supports_prefetch', False): ds.prefetch(indices) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.defn.values(): ds.set_epoch(epoch)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/nested_dictionary_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from . import BaseWrapperDataset class TruncateDataset(BaseWrapperDataset): def __init__(self, dataset, truncation_length): super().__init__(dataset) assert truncation_length is not None self.truncation_length = truncation_length self.dataset = dataset def __getitem__(self, index): item = self.dataset[index] item_len = item.size(0) if item_len > self.truncation_length: item = item[:self.truncation_length] return item @property def sizes(self): return np.minimum(self.dataset.sizes, self.truncation_length) def __len__(self): return len(self.dataset)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/truncate_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import bisect import numpy as np from torch.utils.data.dataloader import default_collate from . import FairseqDataset class ConcatDataset(FairseqDataset): @staticmethod def cumsum(sequence, sample_ratios): r, s = [], 0 for e, ratio in zip(sequence, sample_ratios): curr_len = int(ratio * len(e)) r.append(curr_len + s) s += curr_len return r def __init__(self, datasets, sample_ratios=1): super(ConcatDataset, self).__init__() assert len(datasets) > 0, "datasets should not be an empty iterable" self.datasets = list(datasets) if isinstance(sample_ratios, int): sample_ratios = [sample_ratios] * len(self.datasets) self.sample_ratios = sample_ratios self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios) self.real_sizes = [len(d) for d in self.datasets] def __len__(self): return self.cumulative_sizes[-1] def __getitem__(self, idx): dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx][sample_idx] def _get_dataset_and_sample_index(self, idx: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] sample_idx = sample_idx % self.real_sizes[dataset_idx] return dataset_idx, sample_idx def collater(self, samples): # For now only supports datasets with same underlying collater implementations if hasattr(self.datasets[0], 'collater'): return self.datasets[0].collater(samples) else: return default_collate(samples) def size(self, idx: int): """ Return an example's size as a float or tuple. """ dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx].size(sample_idx) def num_tokens(self, index: int): return np.max(self.size(index)) def attr(self, attr: str, index: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) return getattr(self.datasets[dataset_idx], attr, None) @property def sizes(self): _dataset_sizes = [] for ds, sr in zip(self.datasets, self.sample_ratios): if isinstance(ds.sizes, np.ndarray): _dataset_sizes.append(np.tile(ds.sizes, sr)) else: # Only support underlying dataset with single size array. assert isinstance(ds.sizes, list) _dataset_sizes.append(np.tile(ds.sizes[0], sr)) return np.concatenate(_dataset_sizes) @property def supports_prefetch(self): return all(d.supports_prefetch for d in self.datasets) def ordered_indices(self): """ Returns indices sorted by length. So less padding is needed. """ return np.argsort(self.sizes) def prefetch(self, indices): frm = 0 for to, ds in zip(self.cumulative_sizes, self.datasets): real_size = len(ds) if getattr(ds, 'supports_prefetch', False): ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to]) frm = to def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.datasets: if hasattr(ds, 'set_epoch'): ds.set_epoch(epoch)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset class ReplaceDataset(BaseWrapperDataset): """Replaces tokens found in the dataset by a specified replacement token Args: dataset (~torch.utils.data.Dataset): dataset to replace tokens in replace_map(Dictionary[int,int]): map of token to replace -> replacement token offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be as many as the number of objects returned by the underlying dataset __getitem__ method. """ def __init__(self, dataset, replace_map, offsets): super().__init__(dataset) assert len(replace_map) > 0 self.replace_map = replace_map self.offsets = offsets def __getitem__(self, index): item = self.dataset[index] is_tuple = isinstance(item, tuple) srcs = item if is_tuple else [item] for offset, src in zip(self.offsets, srcs): for k, v in self.replace_map.items(): src_off = src[offset:] if offset >= 0 else src[:offset] src_off.masked_fill_(src_off == k, v) item = srcs if is_tuple else srcs[0] return item
data2vec_vision-main
infoxlm/fairseq/fairseq/data/replace_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from . import FairseqDataset def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True): """Backtranslate a list of samples. Given an input (*samples*) of the form: [{'id': 1, 'source': 'hallo welt'}] this will return: [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}] Args: samples (List[dict]): samples to backtranslate. Individual samples are expected to have a 'source' key, which will become the 'target' after backtranslation. collate_fn (callable): function to collate samples into a mini-batch generate_fn (callable): function to generate backtranslations cuda (bool): use GPU for generation (default: ``True``) Returns: List[dict]: an updated list of samples with a backtranslated source """ collated_samples = collate_fn(samples) s = utils.move_to_cuda(collated_samples) if cuda else collated_samples generated_sources = generate_fn(s) id_to_src = { sample['id']: sample['source'] for sample in samples } # Go through each tgt sentence in batch and its corresponding best # generated hypothesis and create a backtranslation data pair # {id: id, source: generated backtranslation, target: original tgt} return [ {'id': id.item(), 'target': id_to_src[id.item()], 'source': hypos[0]['tokens'].cpu()} for id, hypos in zip(collated_samples['id'], generated_sources) ] class BacktranslationDataset(FairseqDataset): """ Sets up a backtranslation dataset which takes a tgt batch, generates a src using a tgt-src backtranslation function (*backtranslation_fn*), and returns the corresponding `{generated src, input tgt}` batch. Args: tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be backtranslated. Only the source side of this dataset will be used. After backtranslation, the source sentences in this dataset will be returned as the targets. src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated sentences. tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of sentences to be backtranslated. backtranslation_fn (callable, optional): function to call to generate backtranslations. This is typically the `generate` method of a :class:`~fairseq.sequence_generator.SequenceGenerator` object. Pass in None when it is not available at initialization time, and use set_backtranslation_fn function to set it when available. output_collater (callable, optional): function to call on the backtranslated samples to create the final batch (default: ``tgt_dataset.collater``). cuda: use GPU for generation """ def __init__( self, tgt_dataset, src_dict, tgt_dict=None, backtranslation_fn=None, output_collater=None, cuda=True, **kwargs ): self.tgt_dataset = tgt_dataset self.backtranslation_fn = backtranslation_fn self.output_collater = output_collater if output_collater is not None \ else tgt_dataset.collater self.cuda = cuda if torch.cuda.is_available() else False self.src_dict = src_dict self.tgt_dict = tgt_dict def __getitem__(self, index): """ Returns a single sample from *tgt_dataset*. Note that backtranslation is not applied in this step; use :func:`collater` instead to backtranslate a batch of samples. """ return self.tgt_dataset[index] def __len__(self): return len(self.tgt_dataset) def set_backtranslation_fn(self, backtranslation_fn): self.backtranslation_fn = backtranslation_fn def collater(self, samples): """Merge and backtranslate a list of samples to form a mini-batch. Using the samples from *tgt_dataset*, load a collated target sample to feed to the backtranslation model. Then take the backtranslation with the best score as the source and the original input as the target. Note: we expect *tgt_dataset* to provide a function `collater()` that will collate samples into the format expected by *backtranslation_fn*. After backtranslation, we will feed the new list of samples (i.e., the `(backtranslated source, original source)` pairs) to *output_collater* and return the result. Args: samples (List[dict]): samples to backtranslate and collate Returns: dict: a mini-batch with keys coming from *output_collater* """ if samples[0].get('is_dummy', False): return samples samples = backtranslate_samples( samples=samples, collate_fn=self.tgt_dataset.collater, generate_fn=( lambda net_input: self.backtranslation_fn(net_input) ), cuda=self.cuda, ) return self.output_collater(samples) def num_tokens(self, index): """Just use the tgt dataset num_tokens""" return self.tgt_dataset.num_tokens(index) def ordered_indices(self): """Just use the tgt dataset ordered_indices""" return self.tgt_dataset.ordered_indices() def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``. Note: we use *tgt_dataset* to approximate the length of the source sentence, since we do not know the actual length until after backtranslation. """ tgt_size = self.tgt_dataset.size(index)[0] return (tgt_size, tgt_size) @property def supports_prefetch(self): return getattr(self.tgt_dataset, 'supports_prefetch', False) def prefetch(self, indices): return self.tgt_dataset.prefetch(indices)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/backtranslation_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class IdDataset(FairseqDataset): def __getitem__(self, index): return index def __len__(self): return 0 def collater(self, samples): return torch.tensor(samples)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/id_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class PrependDataset(BaseWrapperDataset): def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): super().__init__(dataset) self.prepend_getter = prepend_getter self.ensure_first_token = ensure_first_token_is def __getitem__(self, idx): item = self.dataset[idx] is_tuple = isinstance(item, tuple) src = item[0] if is_tuple else item assert self.ensure_first_token is None or src[0] == self.ensure_first_token prepend_idx = self.prepend_getter(self.dataset, idx) assert isinstance(prepend_idx, int) src[0] = prepend_idx item = tuple((src,) + item[1:]) if is_tuple else src return item
data2vec_vision-main
infoxlm/fairseq/fairseq/data/prepend_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict from typing import Callable, Dict, List import numpy as np from . import FairseqDataset def uniform_sampler(x): # Sample from uniform distribution return np.random.choice(x, 1).item() class MultiCorpusSampledDataset(FairseqDataset): """ Stores multiple instances of FairseqDataset together and in every iteration creates a batch by first sampling a dataset according to a specified probability distribution and then getting instances from that dataset. Args: datasets: an OrderedDict of FairseqDataset instances. sampling_func: A function for sampling over list of dataset keys. Default strategy is to sample uniformly. """ def __init__( self, datasets: Dict[str, FairseqDataset], sampling_func: Callable[[List], int] = None, ): super().__init__() assert isinstance(datasets, OrderedDict) self.datasets = datasets if sampling_func is None: sampling_func = uniform_sampler self.sampling_func = sampling_func self.total_num_instances = 0 for _, dataset in datasets.items(): assert isinstance(dataset, FairseqDataset) self.total_num_instances += dataset.__len__() self._ordered_indices = None def __len__(self): """ Length of this dataset is the sum of individual datasets """ return self.total_num_instances def ordered_indices(self): """ Ordered indices for batching. Here we call the underlying dataset's ordered_indices() so that we get the same random ordering as we would have from using the underlying dataset directly. """ if self._ordered_indices is None: self._ordered_indices = OrderedDict( [ (key, dataset.ordered_indices()) for key, dataset in self.datasets.items() ] ) return np.arange(len(self)) def _map_index_to_dataset(self, key: int, index: int): """ Different underlying datasets have different lengths. In order to ensure we are not accessing an index outside the range of the current dataset size, we wrap around. This function should be called after we have created an ordering for this and all underlying datasets. """ assert ( self._ordered_indices is not None ), "Must call MultiCorpusSampledDataset.ordered_indices() first" mapped_index = index % len(self.datasets[key]) return self._ordered_indices[key][mapped_index] def __getitem__(self, index: int): """ Get the item associated with index from each underlying dataset. Since index is in the range of [0, TotalNumInstances], we need to map the index to the dataset before retrieving the item. """ return OrderedDict( [ (key, dataset[self._map_index_to_dataset(key, index)]) for key, dataset in self.datasets.items() ] ) def collater(self, samples: List[Dict]): """ Generate a mini-batch for this dataset. To convert this into a regular mini-batch we use the following logic: 1. Select a dataset using the specified probability distribution. 2. Call the collater function of the selected dataset. """ if len(samples) == 0: return None selected_key = self.sampling_func(list(self.datasets.keys())) selected_samples = [sample[selected_key] for sample in samples] return self.datasets[selected_key].collater(selected_samples) def num_tokens(self, index: int): """ Return an example's length (number of tokens), used for batching. Here we return the max across all examples at index across all underlying datasets. """ return max( dataset.num_tokens(self._map_index_to_dataset(key, index)) for key, dataset in self.datasets.items() ) def size(self, index: int): """ Return an example's size as a float or tuple. Here we return the max across all underlying datasets. This value is used when filtering a dataset with max-positions. """ return max( dataset.size(self._map_index_to_dataset(key, index)) for key, dataset in self.datasets.items() ) @property def supports_prefetch(self): return all( getattr(dataset, "supports_prefetch", False) for dataset in self.datasets.values() ) def prefetch(self, indices): for key, dataset in self.datasets.items(): dataset.prefetch( [self._map_index_to_dataset(key, index) for index in indices] )
data2vec_vision-main
infoxlm/fairseq/fairseq/data/multi_corpus_sampled_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqDataset class NumSamplesDataset(FairseqDataset): def __getitem__(self, index): return 1 def __len__(self): return 0 def collater(self, samples): return sum(samples)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/num_samples_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import numpy as np from fairseq.data import data_utils class WordNoising(object): """Generate a noisy version of a sentence, without changing words themselves.""" def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None): self.dictionary = dictionary self.bpe_end = None if bpe_cont_marker: self.bpe_end = np.array([ not self.dictionary[i].endswith(bpe_cont_marker) for i in range(len(self.dictionary)) ]) elif bpe_end_marker: self.bpe_end = np.array([ self.dictionary[i].endswith(bpe_end_marker) for i in range(len(self.dictionary)) ]) self.get_word_idx = ( self._get_bpe_word_idx if self.bpe_end is not None else self._get_token_idx ) def noising(self, x, lengths, noising_prob=0.0): raise NotImplementedError() def _get_bpe_word_idx(self, x): """ Given a list of BPE tokens, for every index in the tokens list, return the index of the word grouping that it belongs to. For example, for input x corresponding to ["how", "are", "y@@", "ou"], return [[0], [1], [2], [2]]. """ # x: (T x B) bpe_end = self.bpe_end[x] if (x.size(0) == 1 and x.size(1) == 1): # Special case when we only have one word in x. If x = [[N]], # bpe_end is a scalar (bool) instead of a 2-dim array of bools, # which makes the sum operation below fail. return np.array([[0]]) # do a reduce front sum to generate word ids word_idx = bpe_end[::-1].cumsum(0)[::-1] word_idx = word_idx.max(0)[None, :] - word_idx return word_idx def _get_token_idx(self, x): """ This is to extend noising functions to be able to apply to non-bpe tokens, e.g. word or characters. """ x = torch.t(x) word_idx = np.array([range(len(x_i)) for x_i in x]) return np.transpose(word_idx) class WordDropout(WordNoising): """Randomly drop input words. If not passing blank_idx (default is None), then dropped words will be removed. Otherwise, it will be replaced by the blank_idx.""" def __init__(self, dictionary, default_dropout_prob=0.1, bpe_cont_marker="@@", bpe_end_marker=None): super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) self.default_dropout_prob = default_dropout_prob def noising(self, x, lengths, dropout_prob=None, blank_idx=None): if dropout_prob is None: dropout_prob = self.default_dropout_prob # x: (T x B), lengths: B if dropout_prob == 0: return x, lengths assert 0 < dropout_prob < 1 # be sure to drop entire words word_idx = self.get_word_idx(x) sentences = [] modified_lengths = [] for i in range(lengths.size(0)): # Since dropout probabilities need to apply over non-pad tokens, # it is not trivial to generate the keep mask without consider # input lengths; otherwise, this could be done outside the loop # We want to drop whole words based on word_idx grouping num_words = max(word_idx[:, i]) + 1 # ith example: [x0, x1, ..., eos, pad, ..., pad] # We should only generate keep probs for non-EOS tokens. Thus if the # input sentence ends in EOS, the last word idx is not included in # the dropout mask generation and we append True to always keep EOS. # Otherwise, just generate the dropout mask for all word idx # positions. has_eos = x[lengths[i] - 1, i] == self.dictionary.eos() if has_eos: # has eos? keep = np.random.rand(num_words - 1) >= dropout_prob keep = np.append(keep, [True]) # keep EOS symbol else: keep = np.random.rand(num_words) >= dropout_prob words = x[:lengths[i], i].tolist() # TODO: speed up the following loop # drop words from the input according to keep new_s = [ w if keep[word_idx[j, i]] else blank_idx for j, w in enumerate(words) ] new_s = [w for w in new_s if w is not None] # we need to have at least one word in the sentence (more than the # start / end sentence symbols) if len(new_s) <= 1: # insert at beginning in case the only token left is EOS # EOS should be at end of list. new_s.insert(0, words[np.random.randint(0, len(words))]) assert len(new_s) >= 1 and ( not has_eos # Either don't have EOS at end or last token is EOS or (len(new_s) >= 2 and new_s[-1] == self.dictionary.eos()) ), "New sentence is invalid." sentences.append(new_s) modified_lengths.append(len(new_s)) # re-construct input modified_lengths = torch.LongTensor(modified_lengths) modified_x = torch.LongTensor( modified_lengths.max(), modified_lengths.size(0) ).fill_(self.dictionary.pad()) for i in range(modified_lengths.size(0)): modified_x[:modified_lengths[i], i].copy_(torch.LongTensor(sentences[i])) return modified_x, modified_lengths class WordShuffle(WordNoising): """Shuffle words by no more than k positions.""" def __init__(self, dictionary, default_max_shuffle_distance=3, bpe_cont_marker="@@", bpe_end_marker=None): super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) self.default_max_shuffle_distance = 3 def noising(self, x, lengths, max_shuffle_distance=None): if max_shuffle_distance is None: max_shuffle_distance = self.default_max_shuffle_distance # x: (T x B), lengths: B if max_shuffle_distance == 0: return x, lengths # max_shuffle_distance < 1 will return the same sequence assert max_shuffle_distance > 1 # define noise word scores noise = np.random.uniform( 0, max_shuffle_distance, size=(x.size(0), x.size(1)), ) noise[0] = -1 # do not move start sentence symbol # be sure to shuffle entire words word_idx = self.get_word_idx(x) x2 = x.clone() for i in range(lengths.size(0)): length_no_eos = lengths[i] if x[lengths[i] - 1, i] == self.dictionary.eos(): length_no_eos = lengths[i] - 1 # generate a random permutation scores = word_idx[:length_no_eos, i] + noise[word_idx[:length_no_eos, i], i] # ensure no reordering inside a word scores += 1e-6 * np.arange(length_no_eos) permutation = scores.argsort() # shuffle words x2[:length_no_eos, i].copy_( x2[:length_no_eos, i][torch.from_numpy(permutation)] ) return x2, lengths class UnsupervisedMTNoising(WordNoising): """ Implements the default configuration for noising in UnsupervisedMT (github.com/facebookresearch/UnsupervisedMT) """ def __init__( self, dictionary, max_word_shuffle_distance, word_dropout_prob, word_blanking_prob, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary) self.max_word_shuffle_distance = max_word_shuffle_distance self.word_dropout_prob = word_dropout_prob self.word_blanking_prob = word_blanking_prob self.word_dropout = WordDropout( dictionary=dictionary, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker, ) self.word_shuffle = WordShuffle( dictionary=dictionary, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker, ) def noising(self, x, lengths): # 1. Word Shuffle noisy_src_tokens, noisy_src_lengths = self.word_shuffle.noising( x=x, lengths=lengths, max_shuffle_distance=self.max_word_shuffle_distance, ) # 2. Word Dropout noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( x=noisy_src_tokens, lengths=noisy_src_lengths, dropout_prob=self.word_dropout_prob, ) # 3. Word Blanking noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( x=noisy_src_tokens, lengths=noisy_src_lengths, dropout_prob=self.word_blanking_prob, blank_idx=self.dictionary.unk(), ) return noisy_src_tokens class NoisingDataset(torch.utils.data.Dataset): def __init__( self, src_dataset, src_dict, seed, noiser=None, noising_class=UnsupervisedMTNoising, **kwargs ): """ Wrap a :class:`~torch.utils.data.Dataset` and apply noise to the samples based on the supplied noising configuration. Args: src_dataset (~torch.utils.data.Dataset): dataset to wrap. to build self.src_dataset -- a LanguagePairDataset with src dataset as the source dataset and None as the target dataset. Should NOT have padding so that src_lengths are accurately calculated by language_pair_dataset collate function. We use language_pair_dataset here to encapsulate the tgt_dataset so we can re-use the LanguagePairDataset collater to format the batches in the structure that SequenceGenerator expects. src_dict (~fairseq.data.Dictionary): source dictionary seed (int): seed to use when generating random noise noiser (WordNoising): a pre-initialized :class:`WordNoising` instance. If this is None, a new instance will be created using *noising_class* and *kwargs*. noising_class (class, optional): class to use to initialize a default :class:`WordNoising` instance. kwargs (dict, optional): arguments to initialize the default :class:`WordNoising` instance given by *noiser*. """ self.src_dataset = src_dataset self.src_dict = src_dict self.seed = seed self.noiser = noiser if noiser is not None else noising_class( dictionary=src_dict, **kwargs, ) def __getitem__(self, index): """ Returns a single noisy sample. Multiple samples are fed to the collater create a noising dataset batch. """ src_tokens = self.src_dataset[index] src_lengths = torch.LongTensor([len(src_tokens)]) src_tokens = src_tokens.unsqueeze(0) # Transpose src tokens to fit expected shape of x in noising function # (batch size, sequence length) -> (sequence length, batch size) src_tokens_t = torch.t(src_tokens) with data_utils.numpy_seed(self.seed + index): noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths) # Transpose back to expected src_tokens format # (sequence length, 1) -> (1, sequence length) noisy_src_tokens = torch.t(noisy_src_tokens) return noisy_src_tokens[0] def __len__(self): """ The length of the noising dataset is the length of src. """ return len(self.src_dataset) @property def supports_prefetch(self): return self.src_dataset.supports_prefetch def prefetch(self, indices): if self.src_dataset.supports_prefetch: self.src_dataset.prefetch(indices)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from . import BaseWrapperDataset class SubsampleDataset(BaseWrapperDataset): """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples Args: dataset (~torch.utils.data.Dataset): dataset to subsample size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive) """ def __init__(self, dataset, size_ratio): super().__init__(dataset) assert size_ratio < 1 self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int) self.indices = np.random.choice( list(range(len(self.dataset))), self.actual_size, replace=False ) print( "subsampled dataset from {} to {} (ratio={})".format( len(self.dataset), self.actual_size, size_ratio ) ) def __getitem__(self, index): return self.dataset[self.indices[index]] def __len__(self): return self.actual_size def collater(self, samples): return self.dataset.collater(samples) @property def sizes(self): return self.dataset.sizes[self.indices] @property def name(self): return self.dataset.name def num_tokens(self, index): return self.dataset.num_tokens(self.indices[index]) def size(self, index): return self.dataset.size(self.indices[index]) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) def prefetch(self, indices): self.dataset.prefetch(self.indices[indices])
data2vec_vision-main
infoxlm/fairseq/fairseq/data/subsample_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from . import BaseWrapperDataset class SortDataset(BaseWrapperDataset): def __init__(self, dataset, sort_order): super().__init__(dataset) if not isinstance(sort_order, (list, tuple)): sort_order = [sort_order] self.sort_order = sort_order assert all(len(so) == len(dataset) for so in sort_order) def ordered_indices(self): return np.lexsort(self.sort_order)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/sort_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import data_utils, FairseqDataset def collate(samples, pad_idx, eos_idx): if len(samples) == 0: return {} def merge(key, is_list=False): if is_list: res = [] for i in range(len(samples[0][key])): res.append(data_utils.collate_tokens( [s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False, )) return res else: return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad=False, ) src_tokens = merge('source') if samples[0]['target'] is not None: is_target_list = isinstance(samples[0]['target'], list) target = merge('target', is_target_list) else: target = src_tokens return { 'id': torch.LongTensor([s['id'] for s in samples]), 'nsentences': len(samples), 'ntokens': sum(len(s['source']) for s in samples), 'net_input': { 'src_tokens': src_tokens, 'src_lengths': torch.LongTensor([ s['source'].numel() for s in samples ]), }, 'target': target, } class MonolingualDataset(FairseqDataset): """ A wrapper around torch.utils.data.Dataset for monolingual data. Args: dataset (torch.utils.data.Dataset): dataset to wrap sizes (List[int]): sentence lengths vocab (~fairseq.data.Dictionary): vocabulary shuffle (bool, optional): shuffle the elements before batching (default: True). """ def __init__(self, dataset, sizes, src_vocab, tgt_vocab, add_eos_for_other_targets, shuffle, targets=None, add_bos_token=False): self.dataset = dataset self.sizes = np.array(sizes) self.vocab = src_vocab self.tgt_vocab = tgt_vocab self.add_eos_for_other_targets = add_eos_for_other_targets self.shuffle = shuffle self.add_bos_token = add_bos_token assert targets is None or all(t in {'self', 'future', 'past'} for t in targets), \ "targets must be none or one of 'self', 'future', 'past'" if targets is not None and len(targets) == 0: targets = None self.targets = targets def __getitem__(self, index): if self.targets is not None: # *future_target* is the original sentence # *source* is shifted right by 1 (maybe left-padded with eos) # *past_target* is shifted right by 2 (left-padded as needed) # # Left-to-right language models should condition on *source* and # predict *future_target*. # Right-to-left language models should condition on *source* and # predict *past_target*. source, future_target, past_target = self.dataset[index] source, target = self._make_source_target(source, future_target, past_target) else: source = self.dataset[index] target = None source, target = self._maybe_add_bos(source, target) return {'id': index, 'source': source, 'target': target} def __len__(self): return len(self.dataset) def _make_source_target(self, source, future_target, past_target): if self.targets is not None: target = [] if self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) \ and source[-1] != self.vocab.eos(): # append eos at the end of source source = torch.cat([source, source.new([self.vocab.eos()])]) if 'future' in self.targets: future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])]) if 'past' in self.targets: # first token is before the start of sentence which is only used in "none" break mode when # add_eos_for_other_targets is False past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[-2, None]]) for t in self.targets: if t == 'self': target.append(source) elif t == 'future': target.append(future_target) elif t == 'past': target.append(past_target) else: raise Exception('invalid target ' + t) if len(target) == 1: target = target[0] else: target = future_target return source, self._filter_vocab(target) def _maybe_add_bos(self, source, target): if self.add_bos_token: source = torch.cat([source.new([self.vocab.bos()]), source]) if target is not None: target = torch.cat([target.new([self.tgt_vocab.bos()]), target]) return source, target def _filter_vocab(self, target): if len(self.tgt_vocab) != len(self.vocab): def _filter(target): mask = target.ge(len(self.tgt_vocab)) if mask.any(): target[mask] = self.tgt_vocab.unk() return target if isinstance(target, list): return [_filter(t) for t in target] return _filter(target) return target def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the right. - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the right. """ return collate(samples, self.vocab.pad(), self.vocab.eos()) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return self.sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return self.sizes[index] def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) @property def supports_prefetch(self): return getattr(self.dataset, 'supports_prefetch', False) def prefetch(self, indices): self.dataset.prefetch(indices)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/monolingual_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch.utils.data.dataloader import default_collate from . import FairseqDataset class BaseWrapperDataset(FairseqDataset): def __init__(self, dataset): super().__init__() self.dataset = dataset def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples): if hasattr(self.dataset, 'collater'): return self.dataset.collater(samples) else: return default_collate(samples) @property def sizes(self): return self.dataset.sizes def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, 'supports_prefetch', False) def prefetch(self, indices): self.dataset.prefetch(indices) def set_epoch(self, epoch): super().set_epoch(epoch) if hasattr(self.dataset, 'set_epoch'): self.dataset.set_epoch(epoch)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/base_wrapper_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class NumelDataset(BaseWrapperDataset): def __init__(self, dataset, reduce=False): super().__init__(dataset) self.reduce = reduce def __getitem__(self, index): item = self.dataset[index] if torch.is_tensor(item): return torch.numel(item) else: return np.size(item) def __len__(self): return len(self.dataset) def collater(self, samples): if self.reduce: return sum(samples) else: return torch.tensor(samples)
data2vec_vision-main
infoxlm/fairseq/fairseq/data/numel_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .dictionary import Dictionary, TruncatedDictionary from .fairseq_dataset import FairseqDataset, FairseqIterableDataset from .base_wrapper_dataset import BaseWrapperDataset from .append_token_dataset import AppendTokenDataset from .audio.raw_audio_dataset import FileAudioDataset from .backtranslation_dataset import BacktranslationDataset from .colorize_dataset import ColorizeDataset from .concat_dataset import ConcatDataset from .concat_sentences_dataset import ConcatSentencesDataset from .denoising_dataset import DenoisingDataset from .id_dataset import IdDataset from .indexed_dataset import IndexedCachedDataset, IndexedDataset, IndexedRawTextDataset, MMapIndexedDataset from .language_pair_dataset import LanguagePairDataset from .list_dataset import ListDataset from .lm_context_window_dataset import LMContextWindowDataset from .lru_cache_dataset import LRUCacheDataset from .mask_tokens_dataset import MaskTokensDataset from .monolingual_dataset import MonolingualDataset from .nested_dictionary_dataset import NestedDictionaryDataset from .noising import NoisingDataset from .numel_dataset import NumelDataset from .num_samples_dataset import NumSamplesDataset from .offset_tokens_dataset import OffsetTokensDataset from .pad_dataset import LeftPadDataset, PadDataset, RightPadDataset from .prepend_dataset import PrependDataset from .prepend_token_dataset import PrependTokenDataset from .raw_label_dataset import RawLabelDataset from .replace_dataset import ReplaceDataset from .resampling_dataset import ResamplingDataset from .roll_dataset import RollDataset from .round_robin_zip_datasets import RoundRobinZipDatasets from .sharded_dataset import ShardedDataset from .sort_dataset import SortDataset from .strip_token_dataset import StripTokenDataset from .subsample_dataset import SubsampleDataset from .token_block_dataset import TokenBlockDataset from .transform_eos_dataset import TransformEosDataset from .transform_eos_lang_pair_dataset import TransformEosLangPairDataset from .truncate_dataset import TruncateDataset from .iterators import ( CountingIterator, EpochBatchIterator, GroupedIterator, ShardedIterator, ) __all__ = [ 'AppendTokenDataset', 'BacktranslationDataset', 'BaseWrapperDataset', 'ColorizeDataset', 'ConcatDataset', 'ConcatSentencesDataset', 'CountingIterator', 'DenoisingDataset', 'Dictionary', 'EpochBatchIterator', 'FairseqDataset', 'FairseqIterableDataset', 'GroupedIterator', 'IdDataset', 'IndexedCachedDataset', 'IndexedDataset', 'IndexedRawTextDataset', 'LanguagePairDataset', 'LeftPadDataset', 'ListDataset', 'LMContextWindowDataset', 'LRUCacheDataset', 'MaskTokensDataset', 'MMapIndexedDataset', 'MonolingualDataset', 'NestedDictionaryDataset', 'NoisingDataset', 'NumelDataset', 'NumSamplesDataset', 'OffsetTokensDataset', 'PadDataset', 'PrependDataset', 'PrependTokenDataset', 'ReplaceDataset', 'RollDataset', 'FileAudioDataset', 'RawLabelDataset', 'ResamplingDataset', 'RightPadDataset', 'RoundRobinZipDatasets', 'ShardedDataset', 'ShardedIterator', 'SortDataset', 'StripTokenDataset', 'SubsampleDataset', 'TokenBlockDataset', 'TransformEosDataset', 'TransformEosLangPairDataset', 'TruncateDataset', 'TruncatedDictionary', ]
data2vec_vision-main
infoxlm/fairseq/fairseq/data/__init__.py