python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class BeamableMM(nn.Module):
"""This module provides an optimized MM for beam decoding with attention.
It leverage the fact that the source-side of the input is replicated beam
times and the target-side of the input is of width one. This layer speeds up
inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)}
with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}.
"""
def __init__(self, beam_size=None):
super(BeamableMM, self).__init__()
self.beam_size = beam_size
def forward(self, input1, input2):
if (
not self.training
and self.beam_size is not None # test mode
and input1.dim() == 3 # beam size is set
and input1.size(1) # only support batched input
== 1 # single time step update
):
bsz, beam = input1.size(0), self.beam_size
# bsz x 1 x nhu --> bsz/beam x beam x nhu
input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1)
# bsz x sz2 x nhu --> bsz/beam x sz2 x nhu
input2 = input2.unfold(0, beam, beam)[:, :, :, 0]
# use non batched operation if bsz = beam
if input1.size(0) == 1:
output = torch.mm(input1[0, :, :], input2[0, :, :])
else:
output = input1.bmm(input2)
return output.view(bsz, 1, -1)
else:
return input1.bmm(input2)
def set_beam_size(self, beam_size):
self.beam_size = beam_size
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/beamable_mm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.scalar_bias import scalar_bias
class SingleHeadAttention(nn.Module):
"""
Single-head attention that supports Gating and Downsampling
"""
def __init__(
self,
out_channels,
embed_dim,
head_dim,
head_index,
dropout=0.0,
bias=True,
project_input=True,
gated=False,
downsample=False,
num_heads=1,
):
super().__init__()
self.embed_dim = embed_dim
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_index = head_index
self.head_dim = head_dim
self.project_input = project_input
self.gated = gated
self.downsample = downsample
self.num_heads = num_heads
self.projection = None
k_layers = []
v_layers = []
if self.downsample:
k_layers.append(Downsample(self.head_index))
v_layers.append(Downsample(self.head_index))
out_proj_size = self.head_dim
else:
out_proj_size = self.head_dim * self.num_heads
if self.gated:
k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias)
v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
else:
k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias)
v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_k = nn.Sequential(*k_layers)
self.in_proj_v = nn.Sequential(*v_layers)
if self.downsample:
self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias)
else:
self.out_proj = Linear(out_proj_size, out_channels, bias=bias)
self.scaling = self.head_dim ** -0.5
def forward(
self,
query,
key,
value,
mask_future_timesteps=False,
key_padding_mask=None,
use_scalar_bias=False,
):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
src_len, bsz, out_channels = key.size()
tgt_len = query.size(0)
assert list(query.size()) == [tgt_len, bsz, out_channels]
assert key.size() == value.size()
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.downsample:
size = bsz
else:
size = bsz * self.num_heads
k = key
v = value
q = query
if self.project_input:
q = self.in_proj_q(q)
k = self.in_proj_k(k)
v = self.in_proj_v(v)
src_len = k.size()[0]
q *= self.scaling
if not self.downsample:
q = q.view(tgt_len, size, self.head_dim)
k = k.view(src_len, size, self.head_dim)
v = v.view(src_len, size, self.head_dim)
q = q.transpose(0, 1)
k = k.transpose(0, 1)
v = v.transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if mask_future_timesteps:
assert (
query.size() == key.size()
), "mask_future_timesteps only applies to self-attention"
attn_weights *= torch.tril(
attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(),
diagonal=-1,
)[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0)
attn_weights += torch.triu(
attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(),
diagonal=0,
)[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0)
tgt_size = tgt_len
if use_scalar_bias:
attn_weights = scalar_bias(attn_weights, 2)
v = scalar_bias(v, 1)
tgt_size += 1
if key_padding_mask is not None:
# don't attend to padding symbols
if key_padding_mask.max() > 0:
if self.downsample:
attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len)
else:
attn_weights = attn_weights.view(
size, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-math.inf,
)
attn_weights = attn_weights.view(size, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_weights = self.dropout_module(attn_weights)
attn = torch.bmm(attn_weights, v)
if self.downsample:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
attn = self.out_proj(attn)
return attn, attn_weights
class DownsampledMultiHeadAttention(nn.ModuleList):
"""
Multi-headed attention with Gating and Downsampling
"""
def __init__(
self,
out_channels,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
project_input=True,
gated=False,
downsample=False,
):
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.downsample = downsample
self.gated = gated
self.project_input = project_input
assert self.head_dim * num_heads == embed_dim
if self.downsample:
attention_heads = []
for index in range(self.num_heads):
attention_heads.append(
SingleHeadAttention(
out_channels,
self.embed_dim,
self.head_dim,
index,
dropout,
bias,
self.project_input,
self.gated,
self.downsample,
self.num_heads,
)
)
super().__init__(modules=attention_heads)
self.out_proj = Linear(embed_dim, out_channels, bias=bias)
else:
# either we have a list of attention heads, or just one attention head
# if not being downsampled, we can do the heads with one linear layer instead of separate ones
super().__init__()
self.attention_module = SingleHeadAttention(
out_channels,
self.embed_dim,
self.head_dim,
1,
dropout,
bias,
self.project_input,
self.gated,
self.downsample,
self.num_heads,
)
def forward(
self,
query,
key,
value,
mask_future_timesteps=False,
key_padding_mask=None,
use_scalar_bias=False,
):
src_len, bsz, embed_dim = key.size()
tgt_len = query.size(0)
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
tgt_size = tgt_len
if use_scalar_bias:
tgt_size += 1
attn = []
attn_weights = []
if self.downsample:
for attention_head_number in range(self.num_heads):
# call the forward of each attention head
_attn, _attn_weight = self[attention_head_number](
query,
key,
value,
mask_future_timesteps,
key_padding_mask,
use_scalar_bias,
)
attn.append(_attn)
attn_weights.append(_attn_weight)
full_attn = torch.cat(attn, dim=2)
full_attn = self.out_proj(full_attn)
return full_attn, attn_weights[0].clone()
else:
_attn, _attn_weight = self.attention_module(
query,
key,
value,
mask_future_timesteps,
key_padding_mask,
use_scalar_bias,
)
attn.append(_attn)
attn_weights.append(_attn_weight)
full_attn = torch.cat(attn, dim=2)
full_attn_weights = torch.cat(attn_weights)
full_attn_weights = full_attn_weights.view(
bsz, self.num_heads, tgt_size, src_len
)
full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads
return full_attn, full_attn_weights
class Downsample(nn.Module):
"""
Selects every nth element, where n is the index
"""
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x[:: self.index + 1]
def Linear(in_features, out_features, dropout=0.0, bias=True):
"""Weight-normalized Linear layer (input: B x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
def GatedLinear(in_features, out_features, dropout=0.0, bias=True):
"""Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units"""
return nn.Sequential(
Linear(in_features, out_features * 4, dropout, bias),
nn.GLU(),
Linear(out_features * 2, out_features * 2, dropout, bias),
nn.GLU(),
Linear(out_features, out_features, dropout, bias),
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/downsampled_multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction="mean"):
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
return F.nll_loss(
lprobs,
target,
ignore_index=ignore_index,
reduction=reduction,
)
try:
import xentropy_cuda
from apex.contrib import xentropy
def cross_entropy(logits, target, ignore_index=-100, reduction="mean"):
if logits.device == torch.device("cpu"):
return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
else:
if not getattr(cross_entropy, "_has_logged_once", False):
logger.info("using fused cross entropy")
cross_entropy._has_logged_once = True
half_to_float = logits.dtype == torch.half
losses = xentropy.SoftmaxCrossEntropyLoss.apply(
logits,
target,
0.0,
ignore_index,
half_to_float,
)
if reduction == "sum":
return losses.sum()
elif reduction == "mean":
if ignore_index >= 0:
return losses.sum() / target.ne(ignore_index).sum()
else:
return losses.mean()
elif reduction == "none":
return losses
else:
raise NotImplementedError
except ImportError:
def cross_entropy(logits, target, ignore_index=-100, reduction="mean"):
return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/cross_entropy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
shared_attn=None
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
) if shared_attn is None else shared_attn.k_proj
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
) if shared_attn is None else shared_attn.v_proj
self.q_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
) if shared_attn is None else shared_attn.q_proj
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
) if shared_attn is None else shared_attn.out_proj
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
if shared_attn is None:
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
q_lora=None,
v_lora=None
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
False
and not self.onnx_trace
and not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
if q_lora is not None:
q += q_lora(query)
if v_lora is not None:
v += v_lora(value) if value is not None else v_lora(query)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
if len(attn_mask.size()) < 3:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
if src_len > prev_key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask.float()
elif key_padding_mask is not None:
if src_len > key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = key_padding_mask.float()
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections.abc import Iterable
from itertools import repeat
import torch
import torch.nn as nn
def _pair(v):
if isinstance(v, Iterable):
assert len(v) == 2, "len(v) != 2"
return v
return tuple(repeat(v, 2))
def infer_conv_output_dim(conv_op, input_dim, sample_inchannel):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim)
# N x C x H x W
# N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim
x = conv_op(x)
# N x C x H x W
x = x.transpose(1, 2)
# N x H x C x W
bsz, seq = x.size()[:2]
per_channel_dim = x.size()[3]
# bsz: N, seq: H, CxW the rest
return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim
class VGGBlock(torch.nn.Module):
"""
VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf
Args:
in_channels: (int) number of input channels (typically 1)
out_channels: (int) number of output channels
conv_kernel_size: convolution channels
pooling_kernel_size: the size of the pooling window to take a max over
num_conv_layers: (int) number of convolution layers
input_dim: (int) input dimension
conv_stride: the stride of the convolving kernel.
Can be a single number or a tuple (sH, sW) Default: 1
padding: implicit paddings on both sides of the input.
Can be a single number or a tuple (padH, padW). Default: None
layer_norm: (bool) if layer norm is going to be applied. Default: False
Shape:
Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)
Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)
"""
def __init__(
self,
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim,
conv_stride=1,
padding=None,
layer_norm=False,
):
assert (
input_dim is not None
), "Need input_dim for LayerNorm and infer_conv_output_dim"
super(VGGBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_kernel_size = _pair(conv_kernel_size)
self.pooling_kernel_size = _pair(pooling_kernel_size)
self.num_conv_layers = num_conv_layers
self.padding = (
tuple(e // 2 for e in self.conv_kernel_size)
if padding is None
else _pair(padding)
)
self.conv_stride = _pair(conv_stride)
self.layers = nn.ModuleList()
for layer in range(num_conv_layers):
conv_op = nn.Conv2d(
in_channels if layer == 0 else out_channels,
out_channels,
self.conv_kernel_size,
stride=self.conv_stride,
padding=self.padding,
)
self.layers.append(conv_op)
if layer_norm:
conv_output_dim, per_channel_dim = infer_conv_output_dim(
conv_op, input_dim, in_channels if layer == 0 else out_channels
)
self.layers.append(nn.LayerNorm(per_channel_dim))
input_dim = per_channel_dim
self.layers.append(nn.ReLU())
if self.pooling_kernel_size is not None:
pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True)
self.layers.append(pool_op)
self.total_output_dim, self.output_dim = infer_conv_output_dim(
pool_op, input_dim, out_channels
)
def forward(self, x):
for i, _ in enumerate(self.layers):
x = self.layers[i](x)
return x
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/vggblock.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = torch.zeros(
(1, 1), device=input.device, dtype=input.dtype
).fill_(int(self.padding_idx + input.size(1)))
else:
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/learned_positional_embedding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/grad_multiply.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Tuple
import torch
import torch.nn.functional as F
from fairseq.data import Dictionary
from torch import nn
CHAR_PAD_IDX = 0
CHAR_EOS_IDX = 257
logger = logging.getLogger(__name__)
class CharacterTokenEmbedder(torch.nn.Module):
def __init__(
self,
vocab: Dictionary,
filters: List[Tuple[int, int]],
char_embed_dim: int,
word_embed_dim: int,
highway_layers: int,
max_char_len: int = 50,
char_inputs: bool = False,
):
super(CharacterTokenEmbedder, self).__init__()
self.onnx_trace = False
self.embedding_dim = word_embed_dim
self.max_char_len = max_char_len
self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0)
self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim))
self.eos_idx, self.unk_idx = 0, 1
self.char_inputs = char_inputs
self.convolutions = nn.ModuleList()
for width, out_c in filters:
self.convolutions.append(
nn.Conv1d(char_embed_dim, out_c, kernel_size=width)
)
last_dim = sum(f[1] for f in filters)
self.highway = Highway(last_dim, highway_layers) if highway_layers > 0 else None
self.projection = nn.Linear(last_dim, word_embed_dim)
assert (
vocab is not None or char_inputs
), "vocab must be set if not using char inputs"
self.vocab = None
if vocab is not None:
self.set_vocab(vocab, max_char_len)
self.reset_parameters()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def set_vocab(self, vocab, max_char_len):
word_to_char = torch.LongTensor(len(vocab), max_char_len)
truncated = 0
for i in range(len(vocab)):
if i < vocab.nspecial:
char_idxs = [0] * max_char_len
else:
chars = vocab[i].encode()
# +1 for padding
char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars))
if len(char_idxs) > max_char_len:
truncated += 1
char_idxs = char_idxs[:max_char_len]
word_to_char[i] = torch.LongTensor(char_idxs)
if truncated > 0:
logger.info(
"truncated {} words longer than {} characters".format(
truncated, max_char_len
)
)
self.vocab = vocab
self.word_to_char = word_to_char
@property
def padding_idx(self):
return Dictionary().pad() if self.vocab is None else self.vocab.pad()
def reset_parameters(self):
nn.init.xavier_normal_(self.char_embeddings.weight)
nn.init.xavier_normal_(self.symbol_embeddings)
nn.init.xavier_uniform_(self.projection.weight)
nn.init.constant_(
self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.0
)
nn.init.constant_(self.projection.bias, 0.0)
def forward(
self,
input: torch.Tensor,
):
if self.char_inputs:
chars = input.view(-1, self.max_char_len)
pads = chars[:, 0].eq(CHAR_PAD_IDX)
eos = chars[:, 0].eq(CHAR_EOS_IDX)
if eos.any():
if self.onnx_trace:
chars = torch.where(eos.unsqueeze(1), chars.new_zeros(1), chars)
else:
chars[eos] = 0
unk = None
else:
flat_words = input.view(-1)
chars = self.word_to_char[flat_words.type_as(self.word_to_char)].type_as(
input
)
pads = flat_words.eq(self.vocab.pad())
eos = flat_words.eq(self.vocab.eos())
unk = flat_words.eq(self.vocab.unk())
word_embs = self._convolve(chars)
if self.onnx_trace:
if pads.any():
word_embs = torch.where(
pads.unsqueeze(1), word_embs.new_zeros(1), word_embs
)
if eos.any():
word_embs = torch.where(
eos.unsqueeze(1), self.symbol_embeddings[self.eos_idx], word_embs
)
if unk is not None and unk.any():
word_embs = torch.where(
unk.unsqueeze(1), self.symbol_embeddings[self.unk_idx], word_embs
)
else:
if pads.any():
word_embs[pads] = 0
if eos.any():
word_embs[eos] = self.symbol_embeddings[self.eos_idx]
if unk is not None and unk.any():
word_embs[unk] = self.symbol_embeddings[self.unk_idx]
return word_embs.view(input.size()[:2] + (-1,))
def _convolve(
self,
char_idxs: torch.Tensor,
):
char_embs = self.char_embeddings(char_idxs)
char_embs = char_embs.transpose(1, 2) # BTC -> BCT
conv_result = []
for conv in self.convolutions:
x = conv(char_embs)
x, _ = torch.max(x, -1)
x = F.relu(x)
conv_result.append(x)
x = torch.cat(conv_result, dim=-1)
if self.highway is not None:
x = self.highway(x)
x = self.projection(x)
return x
class Highway(torch.nn.Module):
"""
A `Highway layer <https://arxiv.org/abs/1505.00387>`_.
Adopted from the AllenNLP implementation.
"""
def __init__(self, input_dim: int, num_layers: int = 1):
super(Highway, self).__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList(
[nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]
)
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
# As per comment in AllenNLP:
# We should bias the highway layer to just carry its input forward. We do that by
# setting the bias on `B(x)` to be positive, because that means `g` will be biased to
# be high, so we will carry the input forward. The bias on `B(x)` is the second half
# of the bias vector in each Linear layer.
nn.init.constant_(layer.bias[self.input_dim :], 1)
nn.init.constant_(layer.bias[: self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(self, x: torch.Tensor):
for layer in self.layers:
projection = layer(x)
proj_x, gate = projection.chunk(2, dim=-1)
proj_x = self.activation(proj_x)
gate = torch.sigmoid(gate)
x = gate * x + (gate.new_tensor([1]) - gate) * proj_x
return x
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/character_token_embedder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Any, Dict, List, Tuple, Union
import torch
import torch.utils.checkpoint as checkpoint
from fairseq import utils
def checkpoint_wrapper(m, offload_to_cpu=False):
"""
A friendlier wrapper for performing activation checkpointing.
Compared to the PyTorch version, this version:
- wraps an nn.Module, so that all subsequent calls will use checkpointing
- handles keyword arguments in the forward
- handles non-Tensor outputs from the forward
Usage::
checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True)
a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
"""
# should I check whether original_forward has already been set?
assert not hasattr(
m, "precheckpoint_forward"
), "checkpoint function has already been applied?"
m.precheckpoint_forward = m.forward
m.forward = functools.partial(
_checkpointed_forward,
m.precheckpoint_forward, # original_forward
offload_to_cpu,
)
return m
def unwrap_checkpoint(m: torch.nn.Module):
"""
unwrap a module and its children from checkpoint_wrapper
"""
for module in m.modules():
if hasattr(module, "precheckpoint_forward"):
module.forward = module.precheckpoint_forward
del module.precheckpoint_forward
if hasattr(module, "old_deepcopy_method"):
module.__deepcopy__ = module.old_deepcopy_method
del module.old_deepcopy_method
return m
def _checkpointed_forward(original_forward, offload_to_cpu, *args, **kwargs):
# Autograd Functions in PyTorch work best with positional args, since
# the backward must return gradients (or None) for every input argument.
# We can flatten keyword arguments to make this easier.
kwarg_keys, flat_args = pack_kwargs(*args, **kwargs)
parent_ctx_dict = {"offload": offload_to_cpu}
output = CheckpointFunction.apply(
original_forward, parent_ctx_dict, kwarg_keys, *flat_args
)
if isinstance(output, torch.Tensor):
return output
else:
packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"]
if packed_non_tensor_outputs:
output = unpack_non_tensors(output, packed_non_tensor_outputs)
return output
def pack_kwargs(*args, **kwargs) -> Tuple[List[str], List[Any]]:
"""
Usage::
kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4)
args, kwargs = unpack_kwargs(kwarg_keys, flat_args)
assert args == [1, 2]
assert kwargs == {"a": 3, "b": 4}
"""
kwarg_keys = []
flat_args = list(args)
for k, v in kwargs.items():
kwarg_keys.append(k)
flat_args.append(v)
return kwarg_keys, flat_args
def unpack_kwargs(
kwarg_keys: List[str], flat_args: List[Any]
) -> Tuple[List[Any], Dict[str, Any]]:
if len(kwarg_keys) == 0:
return flat_args, {}
args = flat_args[: -len(kwarg_keys)]
kwargs = {k: v for k, v in zip(kwarg_keys, flat_args[-len(kwarg_keys) :])}
return args, kwargs
def split_non_tensors(
mixed: Union[torch.Tensor, Tuple[Any]]
) -> Tuple[Tuple[torch.Tensor], Dict[str, List[Any]]]:
"""
Usage::
x = torch.Tensor([1])
y = torch.Tensor([2])
tensors, packed_non_tensors = split_non_tensors((x, y, None, 3))
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (x, y, None, 3)
"""
if isinstance(mixed, torch.Tensor):
return (mixed,), None
tensors = []
packed_non_tensors = {"is_tensor": [], "objects": []}
for o in mixed:
if isinstance(o, torch.Tensor):
packed_non_tensors["is_tensor"].append(True)
tensors.append(o)
else:
packed_non_tensors["is_tensor"].append(False)
packed_non_tensors["objects"].append(o)
return tuple(tensors), packed_non_tensors
def unpack_non_tensors(
tensors: Tuple[torch.Tensor],
packed_non_tensors: Dict[str, List[Any]],
) -> Tuple[Any]:
if packed_non_tensors is None:
return tensors
assert isinstance(packed_non_tensors, dict)
mixed = []
is_tensor_list = packed_non_tensors["is_tensor"]
objects = packed_non_tensors["objects"]
assert len(tensors) + len(objects) == len(is_tensor_list)
obj_i = tnsr_i = 0
for is_tensor in is_tensor_list:
if is_tensor:
mixed.append(tensors[tnsr_i])
tnsr_i += 1
else:
mixed.append(objects[obj_i])
obj_i += 1
return tuple(mixed)
class CheckpointFunction(torch.autograd.Function):
"""Similar to the torch version, but support non-Tensor outputs.
The caller is expected to provide a dict (*parent_ctx_dict*) that will hold
the non-Tensor outputs. These should be combined with the Tensor *outputs*
by calling ``unpack_non_tensors``.
"""
@staticmethod
def forward(ctx, run_function, parent_ctx_dict, kwarg_keys, *args):
if torch.is_grad_enabled(): # grad may be disabled, e.g., during validation
checkpoint.check_backward_validity(args)
ctx.run_function = run_function
ctx.kwarg_keys = kwarg_keys
ctx.fwd_rng_state = utils.get_rng_state()
tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args)
if parent_ctx_dict["offload"]:
ctx.fwd_device = tuple(x.device for x in tensor_inputs)
ctx.grad_requirements = tuple(x.requires_grad for x in tensor_inputs)
tensor_inputs = tuple(x.to(torch.device("cpu"), non_blocking=True) for x in tensor_inputs)
else:
ctx.fwd_device, ctx.grad_requirements = None, None
ctx.save_for_backward(*tensor_inputs)
ctx.packed_non_tensor_inputs = packed_non_tensor_inputs
with torch.no_grad():
unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args)
outputs = run_function(*unpacked_args, **unpacked_kwargs)
if isinstance(outputs, torch.Tensor):
return outputs
else:
# Autograd Functions don't like non-Tensor outputs. We can split the
# non-Tensor and Tensor outputs, returning the former by reference
# through *parent_ctx_dict* and returning the latter directly.
outputs, packed_non_tensor_outputs = split_non_tensors(outputs)
parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad(), please use .backward() if possible"
)
tensor_inputs: Tuple = ctx.saved_tensors
tensor_inputs = checkpoint.detach_variable(tensor_inputs)
if ctx.fwd_device is not None:
tensor_inputs = [
t.to(ctx.fwd_device[i], non_blocking=True) for i, t in enumerate(tensor_inputs)
]
for i, need_grad in enumerate(ctx.grad_requirements):
tensor_inputs[i].requires_grad = need_grad
inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs)
# Store the current states.
bwd_rng_state = utils.get_rng_state()
# Set the states to what it used to be before the forward pass.
utils.set_rng_state(ctx.fwd_rng_state)
with torch.enable_grad():
unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs)
outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs)
tensor_outputs, _ = split_non_tensors(outputs)
# Set the states back to what it was at the start of this function.
utils.set_rng_state(bwd_rng_state)
# Run backward() with only Tensors that require grad
outputs_with_grad = []
args_with_grad = []
for i in range(len(tensor_outputs)):
if tensor_outputs[i].requires_grad:
outputs_with_grad.append(tensor_outputs[i])
args_with_grad.append(args[i])
if len(outputs_with_grad) == 0:
raise RuntimeError(
"None of the outputs have requires_grad=True, "
"this checkpoint() is not necessary"
)
torch.autograd.backward(outputs_with_grad, args_with_grad)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs
)
return (None, None, None) + grads
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/checkpoint_activations.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.modules import TransformerSentenceEncoderLayer
from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
"""
Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention)
"""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
export: bool = False,
is_bidirectional: bool = True,
stride: int = 32,
expressivity: int = 8,
) -> None:
super().__init__(
embedding_dim,
ffn_embedding_dim,
num_attention_heads,
dropout,
attention_dropout,
activation_dropout,
activation_fn,
export,
)
self.self_attn = SparseMultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
add_bias_kv=False,
add_zero_attn=False,
self_attention=True,
is_bidirectional=is_bidirectional,
stride=stride,
expressivity=expressivity,
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/sparse_transformer_sentence_encoder_layer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import operator
import torch
import torch.nn.functional as F
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import nn
class TiedLinear(nn.Module):
def __init__(self, weight, transpose):
super().__init__()
self.weight = weight
self.transpose = transpose
def forward(self, input):
return F.linear(input, self.weight.t() if self.transpose else self.weight)
class TiedHeadModule(nn.Module):
def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size):
super().__init__()
tied_emb, _ = weights
self.num_words, emb_dim = tied_emb.size()
self.word_proj = quant_noise(
TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size
)
if input_dim != emb_dim:
self.word_proj = nn.Sequential(
quant_noise(
nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size
),
self.word_proj,
)
self.class_proj = quant_noise(
nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size
)
self.out_dim = self.num_words + num_classes
self.register_buffer("_float_tensor", torch.FloatTensor(1))
def forward(self, input):
inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1)
out = self._float_tensor.new(inp_sz, self.out_dim)
out[:, : self.num_words] = self.word_proj(input.view(inp_sz, -1))
out[:, self.num_words :] = self.class_proj(input.view(inp_sz, -1))
return out
class AdaptiveSoftmax(nn.Module):
"""
This is an implementation of the efficient softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309).
"""
def __init__(
self,
vocab_size,
input_dim,
cutoff,
dropout,
factor=4.0,
adaptive_inputs=None,
tie_proj=False,
q_noise=0,
qn_block_size=8,
):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert (
vocab_size == cutoff[-1]
), "cannot specify cutoff larger than vocab size"
output_dim = cutoff[0] + len(cutoff) - 1
self.vocab_size = vocab_size
self.cutoff = cutoff
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.input_dim = input_dim
self.factor = factor
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.lsm = nn.LogSoftmax(dim=1)
if adaptive_inputs is not None:
self.head = TiedHeadModule(
adaptive_inputs.weights_for_band(0),
input_dim,
len(cutoff) - 1,
self.q_noise,
self.qn_block_size,
)
else:
self.head = quant_noise(
nn.Linear(input_dim, output_dim, bias=False),
self.q_noise,
self.qn_block_size,
)
self._make_tail(adaptive_inputs, tie_proj)
def init_weights(m):
if (
hasattr(m, "weight")
and not isinstance(m, TiedLinear)
and not isinstance(m, TiedHeadModule)
):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer("version", torch.LongTensor([1]))
def _make_tail(self, adaptive_inputs=None, tie_proj=False):
self.tail = nn.ModuleList()
for i in range(len(self.cutoff) - 1):
dim = int(self.input_dim // self.factor ** (i + 1))
tied_emb, tied_proj = (
adaptive_inputs.weights_for_band(i + 1)
if adaptive_inputs is not None
else (None, None)
)
if tied_proj is not None:
if tie_proj:
proj = quant_noise(
TiedLinear(tied_proj, transpose=True),
self.q_noise,
self.qn_block_size,
)
else:
proj = quant_noise(
nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False),
self.q_noise,
self.qn_block_size,
)
else:
proj = quant_noise(
nn.Linear(self.input_dim, dim, bias=False),
self.q_noise,
self.qn_block_size,
)
if tied_emb is None:
out_proj = nn.Linear(
dim, self.cutoff[i + 1] - self.cutoff[i], bias=False
)
else:
out_proj = TiedLinear(tied_emb, transpose=False)
m = nn.Sequential(
proj,
nn.Dropout(self.dropout_module.p),
quant_noise(out_proj, self.q_noise, self.qn_block_size),
)
self.tail.append(m)
def upgrade_state_dict_named(self, state_dict, name):
version_name = name + ".version"
if version_name not in state_dict:
raise Exception("This version of the model is no longer supported")
def adapt_target(self, target):
"""
In order to be efficient, the AdaptiveSoftMax does not compute the
scores for all the word of the vocabulary for all the examples. It is
thus necessary to call the method adapt_target of the AdaptiveSoftMax
layer inside each forward pass.
"""
target = target.view(-1)
new_target = [target.clone()]
target_idxs = []
for i in range(len(self.cutoff) - 1):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
new_target[0][mask] = self.cutoff[0] + i
if mask.any():
target_idxs.append(mask.nonzero(as_tuple=False).squeeze(1))
new_target.append(target[mask].add(-self.cutoff[i]))
else:
target_idxs.append(None)
new_target.append(None)
return new_target, target_idxs
def forward(self, input, target):
"""
Args:
input: (b x t x d)
target: (b x t)
Returns:
2 lists: output for each cutoff section and new targets by cut off
"""
input = input.contiguous().view(-1, input.size(-1))
input = self.dropout_module(input)
new_target, target_idxs = self.adapt_target(target)
output = [self.head(input).float()] # convert to fp32
for i in range(len(target_idxs)):
if target_idxs[i] is not None:
output.append(self.tail[i](input.index_select(0, target_idxs[i])).float()) # convert to fp32
else:
output.append(None)
return output, new_target
def get_log_prob(self, input, target):
"""
Computes the log probabilities for all the words of the vocabulary,
given a 2D tensor of hidden vectors.
"""
bsz, length, dim = input.size()
input = input.contiguous().view(-1, dim)
if target is not None:
_, target_idxs = self.adapt_target(target)
else:
target_idxs = None
head_y = self.head(input)
log_probs = head_y.new_zeros(input.size(0), self.vocab_size)
head_sz = self.cutoff[0] + len(self.tail)
log_probs[:, :head_sz] = self.lsm(head_y)
tail_priors = log_probs[:, self.cutoff[0] : head_sz].clone()
for i in range(len(self.tail)):
start = self.cutoff[i]
end = self.cutoff[i + 1]
if target_idxs is None:
tail_out = log_probs[:, start:end]
tail_out.copy_(self.tail[i](input))
log_probs[:, start:end] = self.lsm(tail_out).add_(
tail_priors[:, i, None]
)
elif target_idxs[i] is not None:
idxs = target_idxs[i]
tail_out = log_probs[idxs, start:end]
tail_out.copy_(self.tail[i](input[idxs]))
log_probs[idxs, start:end] = self.lsm(tail_out).add_(
tail_priors[idxs, i, None]
)
log_probs = log_probs.view(bsz, length, -1)
return log_probs
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/adaptive_softmax.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn.modules.utils import _single
from torch import Tensor
class ConvTBC(torch.nn.Module):
"""1D convolution over an input of shape (time x batch x channel)
The implementation uses gemm to perform the convolution. This implementation
is faster than cuDNN for small kernel sizes.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(
torch.Tensor(self.kernel_size[0], in_channels, out_channels)
)
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.weight)
nn.init.zeros_(self.bias)
def conv_tbc(self, input: Tensor):
return torch.conv_tbc(
input.contiguous(), self.weight, self.bias, self.padding[0]
)
def forward(self, input: Tensor):
return self.conv_tbc(input)
def __repr__(self):
s = (
"{name}({in_channels}, {out_channels}, kernel_size={kernel_size}"
", padding={padding}"
)
if self.bias is None:
s += ", bias=False"
s += ")"
return s.format(name=self.__class__.__name__, **self.__dict__)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/conv_tbc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn.functional as F
def unfold1d(x, kernel_size, padding_l, pad_value=0):
"""unfold T x B x C to T x B x C x K"""
if kernel_size > 1:
T, B, C = x.size()
x = F.pad(
x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value
)
x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C))
else:
x = x.unsqueeze(3)
return x
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/unfold.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch
import sys
from fairseq import utils
from fairseq.distributed import utils as distributed_utils
from fairseq.modules.layer_norm import LayerNorm
class BaseLayer(nn.Module):
def __init__(self, args):
super().__init__()
self.num_workers = distributed_utils.get_data_parallel_world_size()
expert_centroids = torch.empty(self.num_workers, args.decoder_embed_dim)
torch.nn.init.orthogonal_(expert_centroids, gain=0.1)
self.register_parameter("expert_centroids", torch.nn.Parameter(expert_centroids))
self.expert_network = nn.Sequential(*([BaseSublayer(args) for _ in range(args.base_sublayers)]))
self.expert_id = distributed_utils.get_data_parallel_rank()
self.shuffle = args.base_shuffle
self.cpp = self.load_assignment()
# Add a special attribute to the expert parameters, so we know not to sync their gradients
for param in self.expert_network.parameters():
param.expert = True
def forward(self, input_features, *args, **kwargs):
features = input_features.reshape(-1, input_features.size(-1))
is_training = input_features.requires_grad
if self.shuffle and is_training:
# Send each token to a random worker, to break correlations within the batch
shuffle_sort = torch.randperm(features.size(0), device=features.device)
features = All2All.apply(features[shuffle_sort])
with torch.no_grad():
# Compute similarity of each token to each expert, for routing
token_expert_affinities = features.matmul(self.expert_centroids.transpose(0, 1))
# Compute which token goes to which expert
sort_by_expert, input_splits, output_splits = self.balanced_assignment(token_expert_affinities) \
if is_training else self.greedy_assignment(token_expert_affinities)
# Swap these tokens for the right ones for our expert
routed_features = All2All.apply(features[sort_by_expert], output_splits, input_splits)
if routed_features.size(0) > 0:
# Mix in the expert network based on how appropriate it is for these tokens
alpha = torch.sigmoid(routed_features.mv(self.expert_centroids[self.expert_id])).unsqueeze(1)
routed_features = alpha * self.expert_network(routed_features) + (1 - alpha) * routed_features
# Return to original worker and ordering
result = All2All.apply(routed_features, input_splits, output_splits)[self.inverse_sort(sort_by_expert)]
if self.shuffle and is_training:
# Undo shuffling
result = All2All.apply(result)[self.inverse_sort(shuffle_sort)]
# Return additional Nones for compatibility with TransformerDecoderLayer
return result.view(input_features.size()), None, None
def inverse_sort(self, order):
# Creates an index that undoes a sort: xs==xs[order][inverse_sort(order)]
return torch.empty_like(order).scatter_(0, order, torch.arange(0, order.size(0), device=order.device))
def balanced_assignment(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return self.cpp.balanced_assignment(scores), None, None
# Assigns each token to the top k experts
def greedy_assignment(self, scores, k=1):
token_to_workers = torch.topk(scores, dim=1, k=k, largest=True).indices.view(-1)
token_to_workers, sort_ordering = torch.sort(token_to_workers)
worker2token = sort_ordering // k
# Find how many tokens we're sending to each other worker (being careful for sending 0 tokens to some workers)
output_splits = torch.zeros((self.num_workers,), dtype=torch.long, device=scores.device)
workers, counts = torch.unique_consecutive(token_to_workers, return_counts=True)
output_splits[workers] = counts
# Tell other workers how many tokens to expect from us
input_splits = All2All.apply(output_splits)
return worker2token, input_splits.tolist(), output_splits.tolist()
def load_assignment(self):
try:
from fairseq import libbase
return libbase
except ImportError as e:
sys.stderr.write(
"ERROR: missing libbase. run `python setup.py build_ext --inplace`\n"
)
raise e
class BaseSublayer(nn.Module):
def __init__(self, args):
super().__init__()
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu') or "relu"
)
self.norm = LayerNorm(args.decoder_embed_dim, export=False)
self.ff1 = torch.nn.Linear(args.decoder_embed_dim, args.decoder_ffn_embed_dim)
self.ff2 = torch.nn.Linear(args.decoder_ffn_embed_dim, args.decoder_embed_dim)
self.ff2.weight.data.zero_()
def forward(self, xs):
return xs + self.ff2(self.activation_fn(self.ff1(self.norm(xs))))
# Wraps torch.distributed.all_to_all_single as a function that supports autograd
class All2All(torch.autograd.Function):
@staticmethod
def forward(ctx, xs, input_splits=None, output_splits=None):
ctx.input_splits = input_splits
ctx.output_splits = output_splits
ys = torch.empty_like(xs) if output_splits is None else \
xs.new_empty(size=[sum(output_splits)] + list(xs.size()[1:]))
torch.distributed.all_to_all_single(ys, xs, output_split_sizes=output_splits, input_split_sizes=input_splits)
return ys
@staticmethod
def backward(ctx, grad_output):
result = torch.empty_like(grad_output) if ctx.input_splits is None else \
grad_output.new_empty(size=[sum(ctx.input_splits)] + list(grad_output.size()[1:]))
torch.distributed.all_to_all_single(result, grad_output,
output_split_sizes=ctx.input_splits, input_split_sizes=ctx.output_splits)
return result, None, None
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/base_layer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file is to re-implemented the low-rank and beam approximation of CRF layer
Proposed by:
Sun, Zhiqing, et al.
Fast Structured Decoding for Sequence Models
https://arxiv.org/abs/1910.11555
The CRF implementation is mainly borrowed from
https://github.com/kmkurn/pytorch-crf/blob/master/torchcrf/__init__.py
"""
import numpy as np
import torch
import torch.nn as nn
def logsumexp(x, dim=1):
return torch.logsumexp(x.float(), dim=dim).type_as(x)
class DynamicCRF(nn.Module):
"""Dynamic CRF layer is used to approximate the traditional
Conditional Random Fields (CRF)
$P(y | x) = 1/Z(x) exp(sum_i s(y_i, x) + sum_i t(y_{i-1}, y_i, x))$
where in this function, we assume the emition scores (s) are given,
and the transition score is a |V| x |V| matrix $M$
in the following two aspects:
(1) it used a low-rank approximation for the transition matrix:
$M = E_1 E_2^T$
(2) it used a beam to estimate the normalizing factor Z(x)
"""
def __init__(self, num_embedding, low_rank=32, beam_size=64):
super().__init__()
self.E1 = nn.Embedding(num_embedding, low_rank)
self.E2 = nn.Embedding(num_embedding, low_rank)
self.vocb = num_embedding
self.rank = low_rank
self.beam = beam_size
def extra_repr(self):
return "vocab_size={}, low_rank={}, beam_size={}".format(
self.vocb, self.rank, self.beam
)
def forward(self, emissions, targets, masks, beam=None):
"""
Compute the conditional log-likelihood of a sequence of target tokens given emission scores
Args:
emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output
``(batch_size, seq_len, vocab_size)``. We assume batch-first
targets (`~torch.LongTensor`): Sequence of target token indices
``(batch_size, seq_len)
masks (`~torch.ByteTensor`): Mask tensor with the same size as targets
Returns:
`~torch.Tensor`: approximated log-likelihood
"""
numerator = self._compute_score(emissions, targets, masks)
denominator = self._compute_normalizer(emissions, targets, masks, beam)
return numerator - denominator
def forward_decoder(self, emissions, masks=None, beam=None):
"""
Find the most likely output sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output
``(batch_size, seq_len, vocab_size)``. We assume batch-first
masks (`~torch.ByteTensor`): Mask tensor with the same size as targets
Returns:
`~torch.LongTensor`: decoded sequence from the CRF model
"""
return self._viterbi_decode(emissions, masks, beam)
def _compute_score(self, emissions, targets, masks=None):
batch_size, seq_len = targets.size()
emission_scores = emissions.gather(2, targets[:, :, None])[:, :, 0] # B x T
transition_scores = (self.E1(targets[:, :-1]) * self.E2(targets[:, 1:])).sum(2)
scores = emission_scores
scores[:, 1:] += transition_scores
if masks is not None:
scores = scores * masks.type_as(scores)
return scores.sum(-1)
def _compute_normalizer(self, emissions, targets=None, masks=None, beam=None):
# HACK: we include "target" which is a hueristic for training
# HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?)
beam = beam if beam is not None else self.beam
batch_size, seq_len = emissions.size()[:2]
if targets is not None:
_emissions = emissions.scatter(2, targets[:, :, None], np.float("inf"))
beam_targets = _emissions.topk(beam, 2)[1]
beam_emission_scores = emissions.gather(2, beam_targets)
else:
beam_emission_scores, beam_targets = emissions.topk(beam, 2)
beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D
beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D
beam_transition_matrix = torch.bmm(
beam_transition_score1.view(-1, beam, self.rank),
beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2),
)
beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam)
# compute the normalizer in the log-space
score = beam_emission_scores[:, 0] # B x K
for i in range(1, seq_len):
next_score = score[:, :, None] + beam_transition_matrix[:, i - 1]
next_score = logsumexp(next_score, dim=1) + beam_emission_scores[:, i]
if masks is not None:
score = torch.where(masks[:, i : i + 1], next_score, score)
else:
score = next_score
# Sum (log-sum-exp) over all possible tags
return logsumexp(score, dim=1)
def _viterbi_decode(self, emissions, masks=None, beam=None):
# HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?)
beam = beam if beam is not None else self.beam
batch_size, seq_len = emissions.size()[:2]
beam_emission_scores, beam_targets = emissions.topk(beam, 2)
beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D
beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D
beam_transition_matrix = torch.bmm(
beam_transition_score1.view(-1, beam, self.rank),
beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2),
)
beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam)
traj_tokens, traj_scores = [], []
finalized_tokens, finalized_scores = [], []
# compute the normalizer in the log-space
score = beam_emission_scores[:, 0] # B x K
dummy = (
torch.arange(beam, device=score.device).expand(*score.size()).contiguous()
)
for i in range(1, seq_len):
traj_scores.append(score)
_score = score[:, :, None] + beam_transition_matrix[:, i - 1]
_score, _index = _score.max(dim=1)
_score = _score + beam_emission_scores[:, i]
if masks is not None:
score = torch.where(masks[:, i : i + 1], _score, score)
index = torch.where(masks[:, i : i + 1], _index, dummy)
else:
score, index = _score, _index
traj_tokens.append(index)
# now running the back-tracing and find the best
best_score, best_index = score.max(dim=1)
finalized_tokens.append(best_index[:, None])
finalized_scores.append(best_score[:, None])
for idx, scs in zip(reversed(traj_tokens), reversed(traj_scores)):
previous_index = finalized_tokens[-1]
finalized_tokens.append(idx.gather(1, previous_index))
finalized_scores.append(scs.gather(1, previous_index))
finalized_tokens.reverse()
finalized_tokens = torch.cat(finalized_tokens, 1)
finalized_tokens = beam_targets.gather(2, finalized_tokens[:, :, None])[:, :, 0]
finalized_scores.reverse()
finalized_scores = torch.cat(finalized_scores, 1)
finalized_scores[:, 1:] = finalized_scores[:, 1:] - finalized_scores[:, :-1]
return finalized_scores, finalized_tokens
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/dynamic_crf_layer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with
the corresponding GitHub repo: https://github.com/hendrycks/GELUs
"""
import math
import torch
import torch.nn as nn
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/gelu.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
LayerDrop as described in https://arxiv.org/abs/1909.11556.
"""
import torch
import torch.nn as nn
class LayerDropModuleList(nn.ModuleList):
"""
A LayerDrop implementation based on :class:`torch.nn.ModuleList`.
We refresh the choice of which layers to drop every time we iterate
over the LayerDropModuleList instance. During evaluation we always
iterate over all layers.
Usage::
layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
for layer in layers: # this might iterate over layers 1 and 3
x = layer(x)
for layer in layers: # this might iterate over all layers
x = layer(x)
for layer in layers: # this might not iterate over any layers
x = layer(x)
Args:
p (float): probability of dropping out each layer
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, p, modules=None):
super().__init__(modules)
self.p = p
def __iter__(self):
dropout_probs = torch.empty(len(self)).uniform_()
for i, m in enumerate(super().__iter__()):
if not self.training or (dropout_probs[i] > self.p):
yield m
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/layer_drop.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import torch
import torch.nn as nn
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
TransformerSentenceEncoderLayer,
)
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
class TransformerSentenceEncoder(nn.Module):
"""
Implementation for a Bi-directional Transformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
TransformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(
self,
padding_idx: int,
vocab_size: int,
num_encoder_layers: int = 6,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
layerdrop: float = 0.0,
max_seq_len: int = 256,
num_segments: int = 2,
use_position_embeddings: bool = True,
offset_positions_by_padding: bool = True,
encoder_normalize_before: bool = False,
apply_bert_init: bool = False,
activation_fn: str = "relu",
learned_pos_embedding: bool = True,
embed_scale: float = None,
freeze_embeddings: bool = False,
n_trans_layers_to_freeze: int = 0,
export: bool = False,
traceable: bool = False,
q_noise: float = 0.0,
qn_block_size: int = 8,
) -> None:
super().__init__()
self.padding_idx = padding_idx
self.vocab_size = vocab_size
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.layerdrop = layerdrop
self.max_seq_len = max_seq_len
self.embedding_dim = embedding_dim
self.num_segments = num_segments
self.use_position_embeddings = use_position_embeddings
self.apply_bert_init = apply_bert_init
self.learned_pos_embedding = learned_pos_embedding
self.traceable = traceable
self.embed_tokens = self.build_embedding(
self.vocab_size, self.embedding_dim, self.padding_idx
)
self.embed_scale = embed_scale
if q_noise > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
q_noise,
qn_block_size,
)
else:
self.quant_noise = None
self.segment_embeddings = (
nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None)
if self.num_segments > 0
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_seq_len,
self.embedding_dim,
padding_idx=(self.padding_idx if offset_positions_by_padding else None),
learned=self.learned_pos_embedding,
)
if self.use_position_embeddings
else None
)
if encoder_normalize_before:
self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export)
else:
self.emb_layer_norm = None
if self.layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_transformer_sentence_encoder_layer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=self.dropout_module.p,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
for _ in range(num_encoder_layers)
]
)
# Apply initialization of model params after building the model
if self.apply_bert_init:
self.apply(init_bert_params)
def freeze_module_params(m):
if m is not None:
for p in m.parameters():
p.requires_grad = False
if freeze_embeddings:
freeze_module_params(self.embed_tokens)
freeze_module_params(self.segment_embeddings)
freeze_module_params(self.embed_positions)
freeze_module_params(self.emb_layer_norm)
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return nn.Embedding(vocab_size, embedding_dim, padding_idx)
def build_transformer_sentence_encoder_layer(
self,
embedding_dim,
ffn_embedding_dim,
num_attention_heads,
dropout,
attention_dropout,
activation_dropout,
activation_fn,
export,
q_noise,
qn_block_size,
):
return TransformerSentenceEncoderLayer(
embedding_dim=embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
def forward(
self,
tokens: torch.Tensor,
segment_labels: torch.Tensor = None,
last_state_only: bool = False,
positions: Optional[torch.Tensor] = None,
token_embeddings: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
is_tpu = tokens.device.type == "xla"
# compute padding mask. This is needed for multi-head attention
padding_mask = tokens.eq(self.padding_idx)
if not self.traceable and not is_tpu and not padding_mask.any():
padding_mask = None
if token_embeddings is not None:
x = token_embeddings
else:
x = self.embed_tokens(tokens)
if self.embed_scale is not None:
x = x * self.embed_scale
if self.embed_positions is not None:
x = x + self.embed_positions(tokens, positions=positions)
if self.segment_embeddings is not None and segment_labels is not None:
x = x + self.segment_embeddings(segment_labels)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.emb_layer_norm is not None:
x = self.emb_layer_norm(x)
x = self.dropout_module(x)
# account for padding while computing the representation
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
inner_states = []
if not last_state_only:
inner_states.append(x)
for layer in self.layers:
x, _ = layer(x, self_attn_padding_mask=padding_mask, self_attn_mask=attn_mask)
if not last_state_only:
inner_states.append(x)
sentence_rep = x[0, :, :]
if last_state_only:
inner_states = [x]
if self.traceable:
return torch.stack(inner_states), sentence_rep
else:
return inner_states, sentence_rep
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/transformer_sentence_encoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .adaptive_input import AdaptiveInput
from .adaptive_softmax import AdaptiveSoftmax
from .base_layer import BaseLayer
from .beamable_mm import BeamableMM
from .character_token_embedder import CharacterTokenEmbedder
from .conv_tbc import ConvTBC
from .cross_entropy import cross_entropy
from .downsampled_multihead_attention import DownsampledMultiHeadAttention
from .dynamic_convolution import DynamicConv, DynamicConv1dTBC
from .dynamic_crf_layer import DynamicCRF
from .fairseq_dropout import FairseqDropout
from .fp32_group_norm import Fp32GroupNorm
from .gelu import gelu, gelu_accurate
from .grad_multiply import GradMultiply
from .gumbel_vector_quantizer import GumbelVectorQuantizer
from .kmeans_vector_quantizer import KmeansVectorQuantizer
from .layer_drop import LayerDropModuleList
from .layer_norm import Fp32LayerNorm, LayerNorm
from .learned_positional_embedding import LearnedPositionalEmbedding
from .lightweight_convolution import LightweightConv, LightweightConv1dTBC
from .linearized_convolution import LinearizedConvolution
from .location_attention import LocationAttention
from .lora import Lora
from .lstm_cell_with_zoneout import LSTMCellWithZoneOut
from .multihead_attention import MultiheadAttention
from .positional_embedding import PositionalEmbedding
from .same_pad import SamePad
from .scalar_bias import ScalarBias
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer
from .transformer_sentence_encoder import TransformerSentenceEncoder
from .transpose_last import TransposeLast
from .unfold import unfold1d
from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer, TransformerInterleavedDecoderLayerBase
from .vggblock import VGGBlock
__all__ = [
"AdaptiveInput",
"AdaptiveSoftmax",
"BaseLayer",
"BeamableMM",
"CharacterTokenEmbedder",
"ConvTBC",
"cross_entropy",
"DownsampledMultiHeadAttention",
"DynamicConv1dTBC",
"DynamicConv",
"DynamicCRF",
"FairseqDropout",
"Fp32GroupNorm",
"Fp32LayerNorm",
"gelu",
"gelu_accurate",
"GradMultiply",
"GumbelVectorQuantizer",
"KmeansVectorQuantizer",
"LayerDropModuleList",
"LayerNorm",
"LearnedPositionalEmbedding",
"LightweightConv1dTBC",
"LightweightConv",
"LinearizedConvolution",
"LocationAttention",
"Lora",
"LSTMCellWithZoneOut",
"MultiheadAttention",
"PositionalEmbedding",
"SamePad",
"ScalarBias",
"SinusoidalPositionalEmbedding",
"TransformerSentenceEncoderLayer",
"TransformerSentenceEncoder",
"TransformerDecoderLayer",
"TransformerDecoderLayer",
"TransformerEncoderLayer",
"TransposeLast",
"VGGBlock",
"unfold1d",
]
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq.modules import TransformerSentenceEncoder
from fairseq.modules.sparse_transformer_sentence_encoder_layer import (
SparseTransformerSentenceEncoderLayer,
)
class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
"""
Sparse implementation of the TransformerSentenceEncoder
- see SparseMultiheadAttention
"""
def __init__(
self,
padding_idx: int,
vocab_size: int,
num_encoder_layers: int = 6,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_seq_len: int = 256,
num_segments: int = 2,
use_position_embeddings: bool = True,
offset_positions_by_padding: bool = True,
encoder_normalize_before: bool = False,
apply_bert_init: bool = False,
activation_fn: str = "relu",
learned_pos_embedding: bool = True,
embed_scale: float = None,
freeze_embeddings: bool = False,
n_trans_layers_to_freeze: int = 0,
export: bool = False,
is_bidirectional: bool = True,
stride: int = 32,
expressivity: int = 8,
) -> None:
super().__init__(
padding_idx,
vocab_size,
num_encoder_layers,
embedding_dim,
ffn_embedding_dim,
num_attention_heads,
dropout,
attention_dropout,
activation_dropout,
max_seq_len,
num_segments,
use_position_embeddings,
offset_positions_by_padding,
encoder_normalize_before,
apply_bert_init,
activation_fn,
learned_pos_embedding,
embed_scale,
freeze_embeddings,
n_trans_layers_to_freeze,
export,
)
self.layers = nn.ModuleList(
[
SparseTransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
is_bidirectional=is_bidirectional,
stride=stride,
expressivity=expressivity,
)
for _ in range(num_encoder_layers)
]
)
def freeze_module_params(m):
if m is not None:
for p in m.parameters():
p.requires_grad = False
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/sparse_transformer_sentence_encoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from .conv_tbc import ConvTBC
from typing import Dict, Optional
from torch import Tensor
@with_incremental_state
class LinearizedConvolution(ConvTBC):
"""An optimized version of nn.Conv1d.
At training time, this module uses ConvTBC, which is an optimized version
of Conv1d. At inference time, it optimizes incremental generation (i.e.,
one time step at a time) by replacing the convolutions with linear layers.
Note that the input order changes from training to inference.
"""
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self._linearized_weight = None
self.register_backward_hook(self._clear_linearized_weight)
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars)
# don't store redundant _linearized_weight in checkpoints
if prefix + "_linearized_weight" in state:
del state[prefix + "_linearized_weight"]
return state
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
if prefix + "_linearized_weight" in state_dict:
del state_dict[prefix + "_linearized_weight"]
@torch.jit.export
def forward(self, input, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None):
"""
Args:
incremental_state: Used to buffer signal; if not None, then input is
expected to contain a single frame. If the input order changes
between time steps, call reorder_incremental_state.
Input:
Time x Batch x Channel during training
Batch x Time x Channel during inference
"""
if incremental_state is None:
output = self.conv_tbc(input)
if self.kernel_size[0] > 1 and self.padding[0] > 0:
# remove future timesteps added by padding
output = output[: -self.padding[0], :, :]
return output
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
bsz = input.size(0) # input: bsz x len x dim
if kw > 1:
input = input.data
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = input.new(bsz, kw, input.size(2)).zero_()
self._set_input_buffer(incremental_state, input_buffer)
else:
# shift buffer
input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone()
# append next input
input_buffer[:, -1, :] = input[:, -1, :]
input = input_buffer
with torch.no_grad():
output = F.linear(input.view(bsz, -1), weight, self.bias)
return output.view(bsz, 1, -1)
@torch.jit.unused
def reorder_incremental_state(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
@torch.jit.unused
def _get_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
@torch.jit.unused
def _set_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
@torch.jit.unused
def _get_linearized_weight(self):
if self._linearized_weight is None:
kw = self.kernel_size[0]
weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()
assert weight.size() == (self.out_channels, kw, self.in_channels)
return weight.view(self.out_channels, -1)
return self._linearized_weight
@torch.jit.unused
def _clear_linearized_weight(self, *args):
self._linearized_weight = None
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/linearized_convolution.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import LayerNorm, MultiheadAttention, Lora
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
from fairseq.models.transformer import (
TransformerConfig,
)
class TransformerEncoderLayerBase(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.encoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, cfg, shared_layer=None, layerid=None):
super().__init__()
self.cfg = cfg
self.embed_dim = cfg.encoder.embed_dim
self.quant_noise = cfg.quant_noise.pq
self.quant_noise_block_size = cfg.quant_noise.pq_block_size
self.self_attn = self.build_self_attention(self.embed_dim, cfg) if shared_layer is None else shared_layer.self_attn
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=self.__class__.__name__
)
self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn)
activation_dropout_p = cfg.activation_dropout
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = cfg.relu_dropout or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = cfg.encoder.normalize_before
self.fc1 = self.build_fc1(
self.embed_dim,
cfg.encoder.ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
) if shared_layer is None else shared_layer.fc1
self.fc2 = self.build_fc2(
cfg.encoder.ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
) if shared_layer is None else shared_layer.fc2
self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
lora_r = cfg.lora_r
if lora_r > 0 and cfg.lora_r_shape > 0:
assert layerid is not None
ratio = Lora.ratio_r(layerid)
if cfg.lora_r_shape == 2:
ratio = 2.5 - ratio
lora_r = int(lora_r * ratio)
if lora_r > 0:
self.q_lora = Lora(self.embed_dim, lora_r, self.embed_dim)
self.v_lora = Lora(self.embed_dim, lora_r, self.embed_dim)
else:
self.q_lora = None
self.v_lora = None
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size
)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size
)
def build_self_attention(self, embed_dim, cfg):
return MultiheadAttention(
embed_dim,
cfg.encoder.attention_heads,
dropout=cfg.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(
attn_mask.to(torch.bool),
-1e8 if x.dtype == torch.float32 else -1e4
)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
need_weights=False,
attn_mask=attn_mask,
q_lora=self.q_lora,
v_lora=self.v_lora
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
# backward compatible with the legacy argparse format
class TransformerEncoderLayer(TransformerEncoderLayerBase):
def __init__(self, args):
super().__init__(TransformerConfig.from_namespace(args))
self.args = args
def build_self_attention(self, embed_dim, args):
return super().build_self_attention(
embed_dim, TransformerConfig.from_namespace(args)
)
class TransformerDecoderLayerBase(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.decoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, shared_layer=None
):
super().__init__()
self.embed_dim = cfg.decoder.embed_dim
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=self.__class__.__name__
)
self.quant_noise = cfg.quant_noise.pq
self.quant_noise_block_size = cfg.quant_noise.pq_block_size
self.cross_self_attention = cfg.cross_self_attention
self.self_attn = self.build_self_attention(
self.embed_dim,
cfg,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
shared_attn=shared_layer.self_attn if shared_layer is not None else None
)
self.attn_ln = LayerNorm(self.embed_dim) if utils.safe_getattr(cfg, 'scale_attn', False) else None
self.nh = self.self_attn.num_heads
self.head_dim = self.self_attn.head_dim
scale_heads = utils.safe_getattr(cfg, 'scale_heads', False)
self.c_attn = nn.Parameter(torch.ones((self.nh,)), requires_grad=True) if scale_heads else None
self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn)
activation_dropout_p = cfg.activation_dropout
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = cfg.relu_dropout or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = cfg.decoder.normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg, shared_attn=shared_layer.encoder_attn if shared_layer is not None else None)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
self.ffn_layernorm = LayerNorm(cfg.decoder.ffn_embed_dim) if utils.safe_getattr(cfg, 'scale_fc', False) else None
self.w_resid = nn.Parameter(torch.ones(self.embed_dim, ), requires_grad=True) if utils.safe_getattr(cfg, 'scale_resids', False) else None
self.fc1 = self.build_fc1(
self.embed_dim,
cfg.decoder.ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
) if shared_layer is None else shared_layer.fc1
self.fc2 = self.build_fc2(
cfg.decoder.ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
) if shared_layer is None else shared_layer.fc2
self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False, shared_attn=None
):
return MultiheadAttention(
embed_dim,
cfg.decoder.attention_heads,
dropout=cfg.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not cfg.cross_self_attention,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
shared_attn=shared_attn
)
def build_encoder_attention(self, embed_dim, cfg, shared_attn=None):
return MultiheadAttention(
embed_dim,
cfg.decoder.attention_heads,
kdim=cfg.encoder.embed_dim,
vdim=cfg.encoder.embed_dim,
dropout=cfg.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
shared_attn=shared_attn
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
if self.c_attn is not None:
tgt_len, bsz = x.size(0), x.size(1)
x = x.view(tgt_len, bsz, self.nh, self.head_dim)
x = torch.einsum('tbhd,h->tbdh', x, self.c_attn)
x = x.reshape(tgt_len, bsz, self.embed_dim)
if self.attn_ln is not None:
x = self.attn_ln(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
if self.ffn_layernorm is not None:
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = self.dropout_module(x)
if self.w_resid is not None:
residual = torch.mul(self.w_resid, residual)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
class TransformerInterleavedDecoderLayerBase(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.decoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, encoder_layers=None
):
super().__init__()
self.embed_dim = cfg.decoder.embed_dim
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=self.__class__.__name__
)
self.quant_noise = cfg.quant_noise.pq
self.quant_noise_block_size = cfg.quant_noise.pq_block_size
self.cross_self_attention = cfg.cross_self_attention
self.self_attn = self.build_self_attention(
self.embed_dim,
cfg,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
shared_attn=encoder_layers[0].self_attn
)
self.attn_ln = LayerNorm(self.embed_dim) if utils.safe_getattr(cfg, 'scale_attn', False) else None
self.nh = self.self_attn.num_heads
self.head_dim = self.self_attn.head_dim
scale_heads = utils.safe_getattr(cfg, 'scale_heads', False)
self.c_attn = nn.Parameter(torch.ones((self.nh,)), requires_grad=True) if scale_heads else None
self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn)
activation_dropout_p = cfg.activation_dropout
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = cfg.relu_dropout or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = cfg.decoder.normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg, shared_attn=encoder_layers[1].self_attn)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
self.ffn_layernorm = LayerNorm(cfg.decoder.ffn_embed_dim) if utils.safe_getattr(cfg, 'scale_fc', False) else None
self.w_resid = nn.Parameter(torch.ones(self.embed_dim, ), requires_grad=True) if utils.safe_getattr(cfg, 'scale_resids', False) else None
self.fc1 = self.build_fc1(
self.embed_dim,
cfg.decoder.ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
cfg.decoder.ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False, shared_attn=None
):
return MultiheadAttention(
embed_dim,
cfg.decoder.attention_heads,
dropout=cfg.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not cfg.cross_self_attention,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
shared_attn=shared_attn
)
def build_encoder_attention(self, embed_dim, cfg, shared_attn=None):
return MultiheadAttention(
embed_dim,
cfg.decoder.attention_heads,
kdim=cfg.encoder.embed_dim,
vdim=cfg.encoder.embed_dim,
dropout=cfg.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
shared_attn=shared_attn
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
if self.c_attn is not None:
tgt_len, bsz = x.size(0), x.size(1)
x = x.view(tgt_len, bsz, self.nh, self.head_dim)
x = torch.einsum('tbhd,h->tbdh', x, self.c_attn)
x = x.reshape(tgt_len, bsz, self.embed_dim)
if self.attn_ln is not None:
x = self.attn_ln(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
# intermediate ffn
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
if self.ffn_layernorm is not None:
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = self.dropout_module(x)
if self.w_resid is not None:
residual = torch.mul(self.w_resid, residual)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
# encoder_attn
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
# final ffn
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
if self.ffn_layernorm is not None:
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = self.dropout_module(x)
if self.w_resid is not None:
residual = torch.mul(self.w_resid, residual)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
# backward compatible with the legacy argparse format
class TransformerInterleavedDecoderLayer(TransformerInterleavedDecoderLayerBase):
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, encoder_layers=None
):
super().__init__(
TransformerConfig.from_namespace(args),
no_encoder_attn=no_encoder_attn,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
encoder_layers=encoder_layers
)
self.args = args
def build_self_attention(
self, embed_dim, args, add_bias_kv=False, add_zero_attn=False, shared_attn=None
):
return super().build_self_attention(
embed_dim,
TransformerConfig.from_namespace(args),
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
shared_attn=shared_attn
)
def build_encoder_attention(self, embed_dim, args, shared_attn=None):
return super().build_encoder_attention(
embed_dim,
TransformerConfig.from_namespace(args),
shared_attn=shared_attn
)
# backward compatible with the legacy argparse format
class TransformerDecoderLayer(TransformerDecoderLayerBase):
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__(
TransformerConfig.from_namespace(args),
no_encoder_attn=no_encoder_attn,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.args = args
def build_self_attention(
self, embed_dim, args, add_bias_kv=False, add_zero_attn=False
):
return super().build_self_attention(
embed_dim,
TransformerConfig.from_namespace(args),
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
def build_encoder_attention(self, embed_dim, args):
return super().build_encoder_attention(
embed_dim,
TransformerConfig.from_namespace(args),
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/transformer_layer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import LayerNorm, MultiheadAttention
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
export: bool = False,
q_noise: float = 0.0,
qn_block_size: int = 8,
init_fn: Callable = None,
) -> None:
super().__init__()
if init_fn is not None:
init_fn()
# Initialize parameters
self.embedding_dim = embedding_dim
self.num_attention_heads = num_attention_heads
self.attention_dropout = attention_dropout
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.activation_dropout_module = FairseqDropout(
activation_dropout, module_name=self.__class__.__name__
)
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = self.build_self_attention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = self.build_fc1(
self.embedding_dim,
ffn_embedding_dim,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
self.fc2 = self.build_fc2(
ffn_embedding_dim,
self.embedding_dim,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self,
embed_dim,
num_attention_heads,
dropout,
self_attention,
q_noise,
qn_block_size,
):
return MultiheadAttention(
embed_dim,
num_attention_heads,
dropout=dropout,
self_attention=True,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
def forward(
self,
x: torch.Tensor,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer implementation.
"""
residual = x
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/transformer_sentence_encoder_layer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
def quant_noise(module, p, block_size):
"""
Wraps modules and applies quantization noise to the weights for
subsequent quantization with Iterative Product Quantization as
described in "Training with Quantization Noise for Extreme Model Compression"
Args:
- module: nn.Module
- p: amount of Quantization Noise
- block_size: size of the blocks for subsequent quantization with iPQ
Remarks:
- Module weights must have the right sizes wrt the block size
- Only Linear, Embedding and Conv2d modules are supported for the moment
- For more detail on how to quantize by blocks with convolutional weights,
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
- We implement the simplest form of noise here as stated in the paper
which consists in randomly dropping blocks
"""
# if no quantization noise, don't register hook
if p <= 0:
return module
# supported modules
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
# test whether module.weight has the right sizes wrt block_size
is_conv = module.weight.ndim == 4
# 2D matrix
if not is_conv:
assert (
module.weight.size(1) % block_size == 0
), "Input features must be a multiple of block sizes"
# 4D matrix
else:
# 1x1 convolutions
if module.kernel_size == (1, 1):
assert (
module.in_channels % block_size == 0
), "Input channels must be a multiple of block sizes"
# regular convolutions
else:
k = module.kernel_size[0] * module.kernel_size[1]
assert k % block_size == 0, "Kernel size must be a multiple of block size"
def _forward_pre_hook(mod, input):
# no noise for evaluation
if mod.training:
if not is_conv:
# gather weight and sizes
weight = mod.weight
in_features = weight.size(1)
out_features = weight.size(0)
# split weight matrix into blocks and randomly drop selected blocks
mask = torch.zeros(
in_features // block_size * out_features, device=weight.device
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
else:
# gather weight and sizes
weight = mod.weight
in_channels = mod.in_channels
out_channels = mod.out_channels
# split weight matrix into blocks and randomly drop selected blocks
if mod.kernel_size == (1, 1):
mask = torch.zeros(
int(in_channels // block_size * out_channels),
device=weight.device,
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
else:
mask = torch.zeros(
weight.size(0), weight.size(1), device=weight.device
)
mask.bernoulli_(p)
mask = (
mask.unsqueeze(2)
.unsqueeze(3)
.repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
)
# scale weights and apply mask
mask = mask.to(
torch.bool
) # x.bool() is not currently supported in TorchScript
s = 1 / (1 - p)
mod.weight.data = s * weight.masked_fill(mask, 0)
module.register_forward_pre_hook(_forward_pre_hook)
return module
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quant_noise.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch
import torch.nn.functional as F
class LocationAttention(nn.Module):
"""
Attention-Based Models for Speech Recognition
https://arxiv.org/pdf/1506.07503.pdf
:param int encoder_dim: # projection-units of encoder
:param int decoder_dim: # units of decoder
:param int attn_dim: attention dimension
:param int conv_dim: # channels of attention convolution
:param int conv_kernel_size: filter size of attention convolution
"""
def __init__(self, attn_dim, encoder_dim, decoder_dim,
attn_state_kernel_size, conv_dim, conv_kernel_size,
scaling=2.0):
super(LocationAttention, self).__init__()
self.attn_dim = attn_dim
self.decoder_dim = decoder_dim
self.scaling = scaling
self.proj_enc = nn.Linear(encoder_dim, attn_dim)
self.proj_dec = nn.Linear(decoder_dim, attn_dim, bias=False)
self.proj_attn = nn.Linear(conv_dim, attn_dim, bias=False)
self.conv = nn.Conv1d(attn_state_kernel_size, conv_dim,
2 * conv_kernel_size + 1,
padding=conv_kernel_size, bias=False)
self.proj_out = nn.Sequential(nn.Tanh(), nn.Linear(attn_dim, 1))
self.proj_enc_out = None # cache
def clear_cache(self):
self.proj_enc_out = None
def forward(self, encoder_out, encoder_padding_mask, decoder_h, attn_state):
"""
:param torch.Tensor encoder_out: padded encoder hidden state B x T x D
:param torch.Tensor encoder_padding_mask: encoder padding mask
:param torch.Tensor decoder_h: decoder hidden state B x D
:param torch.Tensor attn_prev: previous attention weight B x K x T
:return: attention weighted encoder state (B, D)
:rtype: torch.Tensor
:return: previous attention weights (B x T)
:rtype: torch.Tensor
"""
bsz, seq_len, _ = encoder_out.size()
if self.proj_enc_out is None:
self.proj_enc_out = self.proj_enc(encoder_out)
# B x K x T -> B x C x T
attn = self.conv(attn_state)
# B x C x T -> B x T x C -> B x T x D
attn = self.proj_attn(attn.transpose(1, 2))
if decoder_h is None:
decoder_h = encoder_out.new_zeros(bsz, self.decoder_dim)
dec_h = self.proj_dec(decoder_h).view(bsz, 1, self.attn_dim)
out = self.proj_out(attn + self.proj_enc_out + dec_h).squeeze(2)
out.masked_fill_(encoder_padding_mask, -float("inf"))
w = F.softmax(self.scaling * out, dim=1)
c = torch.sum(encoder_out * w.view(bsz, seq_len, 1), dim=1)
return c, w
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/location_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Layer norm done in fp32 (for fp16 training)
"""
import torch.nn as nn
import torch.nn.functional as F
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/fp32_group_norm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from .unfold import unfold1d
def DynamicConv(
input_size,
kernel_size=1,
padding_l=None,
num_heads=1,
weight_dropout=0.0,
weight_softmax=False,
renorm_padding=False,
bias=False,
conv_bias=False,
query_size=None,
in_proj=False,
):
if torch.cuda.is_available():
try:
from fairseq.modules.dynamicconv_layer import DynamicconvLayer
return DynamicconvLayer(
input_size,
kernel_size=kernel_size,
padding_l=padding_l,
num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax,
renorm_padding=renorm_padding,
bias=bias,
conv_bias=conv_bias,
query_size=query_size,
)
except ImportError as e:
print(e)
return DynamicConv1dTBC(
input_size,
kernel_size=kernel_size,
padding_l=padding_l,
num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax,
renorm_padding=renorm_padding,
bias=bias,
conv_bias=conv_bias,
query_size=query_size,
)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
@with_incremental_state
class DynamicConv1dTBC(nn.Module):
"""Dynamic lightweight convolution taking T x B x C inputs
Args:
input_size: # of channels of the input
kernel_size: convolution channels
padding_l: padding to the left when using "same" padding
num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
weight_dropout: the drop rate of the DropConnect to drop the weight
weight_softmax: normalize the weight with softmax before the convolution
renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1)
bias: use bias
conv_bias: bias of the convolution
query_size: specified when feeding a different input as the query
in_proj: project the input and generate the filter together
Shape:
Input: TxBxC, i.e. (timesteps, batch_size, input_size)
Output: TxBxC, i.e. (timesteps, batch_size, input_size)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
"""
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
num_heads=1,
weight_dropout=0.0,
weight_softmax=False,
renorm_padding=False,
bias=False,
conv_bias=False,
query_size=None,
in_proj=False,
):
super().__init__()
self.input_size = input_size
self.query_size = input_size if query_size is None else query_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.weight_softmax = weight_softmax
self.renorm_padding = renorm_padding
if in_proj:
self.weight_linear = Linear(
self.input_size, self.input_size + num_heads * kernel_size * 1
)
else:
self.weight_linear = Linear(
self.query_size, num_heads * kernel_size * 1, bias=bias
)
if conv_bias:
self.conv_bias = nn.Parameter(torch.Tensor(input_size))
else:
self.conv_bias = None
self.reset_parameters()
@property
def in_proj(self):
return (
self.weight_linear.out_features
== self.input_size + self.num_heads * self.kernel_size
)
def reset_parameters(self):
self.weight_linear.reset_parameters()
if self.conv_bias is not None:
nn.init.constant_(self.conv_bias, 0.0)
def forward(self, x, incremental_state=None, query=None, unfold=None):
"""Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
incremental_state: A dict to keep the state
unfold: unfold the input or not. If not, we use the matrix trick instead
query: use the specified query to predict the conv filters
"""
unfold = (
x.size(0) > 512 if unfold is None else unfold
) # use unfold mode as default for long sequence to save memory
unfold = unfold or (incremental_state is not None)
assert query is None or not self.in_proj
if query is None:
query = x
if unfold:
output = self._forward_unfolded(x, incremental_state, query)
else:
output = self._forward_expanded(x, incremental_state, query)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
def _forward_unfolded(self, x, incremental_state, query):
"""The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right."""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
if self.in_proj:
proj = self.weight_linear(x)
x = proj.narrow(2, 0, self.input_size).contiguous()
weight = (
proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1)
)
else:
weight = self.weight_linear(query).view(T * B * H, -1)
# renorm_padding is only implemented in _forward_expanded
assert not self.renorm_padding or incremental_state is not None
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
else:
padding_l = self.padding_l
if K > T and padding_l == K - 1:
weight = weight.narrow(1, K - T, T)
K, padding_l = T, T - 1
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, K, padding_l, 0)
x_unfold = x_unfold.view(T * B * H, R, K)
if self.weight_softmax and not self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = weight.narrow(1, 0, K)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2) :]
K = weight.size(1)
if self.weight_softmax and self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_stat, query):
"""Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
"""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
if self.in_proj:
proj = self.weight_linear(x)
x = proj.narrow(2, 0, self.input_size).contiguous()
weight = (
proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1)
)
else:
weight = self.weight_linear(query).view(T * B * H, -1)
if not self.renorm_padding:
if self.weight_softmax:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
weight = weight.narrow(1, 0, K).contiguous()
weight = weight.view(T, B * H, K).transpose(0, 1)
x = x.view(T, B * H, R).transpose(0, 1)
if self.weight_softmax and self.renorm_padding:
# turn the convolution filters into band matrices
weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf"))
weight_expanded.as_strided(
(B * H, T, K), (T * (T + K - 1), T + K, 1)
).copy_(weight)
weight_expanded = weight_expanded.narrow(2, self.padding_l, T)
# normalize the weight over valid positions like self-attention
weight_expanded = F.softmax(weight_expanded, dim=2)
weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)
else:
P = self.padding_l
# For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length
if K > T and P == K - 1:
weight = weight.narrow(2, K - T, T)
K, P = T, T - 1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)
weight_expanded.as_strided(
(B * H, T, K), (T * (T + K - 1), T + K, 1)
).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def extra_repr(self):
s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}".format(
self.input_size,
self.kernel_size,
self.padding_l,
self.num_heads,
self.weight_softmax,
self.conv_bias is not None,
self.renorm_padding,
self.in_proj,
)
if self.query_size != self.input_size:
s += ", query_size={}".format(self.query_size)
if self.weight_dropout_module.p > 0.0:
s += ", weight_dropout={}".format(self.weight_dropout_module.p)
return s
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/dynamic_convolution.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool = False):
if self.p > 0 and (self.training or self.apply_during_inference):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def make_generation_fast_(
self,
name: str,
retain_dropout: bool = False,
retain_dropout_modules: Optional[List[str]] = None,
**kwargs
):
if retain_dropout:
if retain_dropout_modules is not None and self.module_name is None:
logger.warning(
"Cannot enable dropout during inference for module {} "
"because module_name was not set".format(name)
)
elif (
retain_dropout_modules is None # if None, apply to all modules
or self.module_name in retain_dropout_modules
):
logger.info(
"Enabling dropout during inference for module: {}".format(name)
)
self.apply_during_inference = True
else:
logger.info("Disabling dropout for module: {}".format(name))
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/fairseq_dropout.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class GumbelVectorQuantizer(nn.Module):
def __init__(
self,
dim,
num_vars,
temp,
groups,
combine_groups,
vq_dim,
time_first,
activation=nn.GELU(),
weight_proj_depth=1,
weight_proj_factor=1,
):
"""Vector quantization using gumbel softmax
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1
weight_proj_depth: number of layers (with activation in between) to project input before computing logits
weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of
projections by this factor
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim))
nn.init.uniform_(self.vars)
if weight_proj_depth > 1:
def block(input_dim, output_dim):
return nn.Sequential(nn.Linear(input_dim, output_dim), activation)
inner_dim = self.input_dim * weight_proj_factor
self.weight_proj = nn.Sequential(
*[
block(self.input_dim if i == 0 else inner_dim, inner_dim)
for i in range(weight_proj_depth - 1)
],
nn.Linear(inner_dim, groups * num_vars),
)
else:
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
if isinstance(temp, str):
import ast
temp = ast.literal_eval(temp)
assert len(temp) == 3, f"{temp}, {len(temp)}"
self.max_temp, self.min_temp, self.temp_decay = temp
self.curr_temp = self.max_temp
self.codebook_indices = None
def set_num_updates(self, num_updates):
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def get_codebook_indices(self):
if self.codebook_indices is None:
from itertools import product
p = [range(self.num_vars)] * self.groups
inds = list(product(*p))
self.codebook_indices = torch.tensor(
inds, dtype=torch.long, device=self.vars.device
).flatten()
if not self.combine_groups:
self.codebook_indices = self.codebook_indices.view(
self.num_vars ** self.groups, -1
)
for b in range(1, self.groups):
self.codebook_indices[:, b] += self.num_vars * b
self.codebook_indices = self.codebook_indices.flatten()
return self.codebook_indices
def codebook(self):
indices = self.get_codebook_indices()
return (
self.vars.squeeze(0)
.index_select(0, indices)
.view(self.num_vars ** self.groups, -1)
)
def sample_from_codebook(self, b, n):
indices = self.get_codebook_indices()
indices = indices.view(-1, self.groups)
cb_size = indices.size(0)
assert (
n < cb_size
), f"sample size {n} is greater than size of codebook {cb_size}"
sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))
indices = indices[sample_idx]
z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)
return z
def to_codebook_index(self, indices):
res = indices.new_full(indices.shape[:-1], 0)
for i in range(self.groups):
exponent = self.groups - i - 1
res += indices[..., i] * (self.num_vars ** exponent)
return res
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars * self.groups}
if not self.time_first:
x = x.transpose(1, 2)
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
_, k = x.max(-1)
hard_x = (
x.new_zeros(*x.shape)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
avg_probs = torch.softmax(
x.view(bsz * tsz, self.groups, -1).float(), dim=-1
).mean(dim=0)
result["prob_perplexity"] = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
).sum()
result["temp"] = self.curr_temp
if self.training:
x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
if self.combine_groups:
vars = vars.repeat(1, self.groups, 1)
if produce_targets:
result["targets"] = (
x.view(bsz * tsz * self.groups, -1)
.argmax(dim=-1)
.view(bsz, tsz, self.groups)
.detach()
)
x = x.unsqueeze(-1) * vars
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
if not self.time_first:
x = x.transpose(1, 2) # BTC -> BCT
result["x"] = x
return result
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/gumbel_vector_quantizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from fairseq.modules import Fp32GroupNorm
class KmeansVectorQuantizer(nn.Module):
def __init__(
self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25
):
"""Vector quantization using straight pass-through estimator (i.e. kmeans)
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
gamma: commitment loss coefficient
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.vq_dim = vq_dim
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
self.var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.embedding = nn.Parameter(
0.01 * torch.randn(num_vars, num_groups, self.var_dim)
)
self.projection = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size=1, groups=groups, bias=False),
Fp32GroupNorm(groups, dim),
)
self.gamma = gamma
self.mse_mean = nn.MSELoss(reduction="mean")
def _pass_grad(self, x, y):
"""Manually set gradient for backward pass.
for y = f(x), ensure that during the backward pass,
dL/dy = dL/dx regardless of f(x).
Returns:
y, with the gradient forced to be dL/dy = dL/dx.
"""
return y.detach() + (x - x.detach())
@property
def expand_embedding(self):
if self.combine_groups:
return self.embedding.expand(self.num_vars, self.groups, self.var_dim)
return self.embedding
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars}
if self.time_first:
x = x.transpose(1, 2)
bsz, fsz, tsz = x.shape
ze = self.projection(x)
ze_ = ze.view(bsz, self.groups, self.var_dim, tsz).permute(0, 3, 1, 2)
d = (
(ze_.unsqueeze(0) - self.expand_embedding.unsqueeze(1).unsqueeze(1))
.view(self.num_vars, bsz, tsz, self.groups, -1)
.norm(dim=-1, p=2)
)
idx = d.argmin(dim=0)
zq = (
torch.stack(
[
self.expand_embedding[idx[..., group], group]
for group in range(self.groups)
],
dim=-2,
)
.view(bsz, tsz, self.groups * self.var_dim)
.permute(0, 2, 1)
)
assert ze.shape == zq.shape, (ze.shape, zq.shape)
x = self._pass_grad(ze, zq)
hard_x = (
idx.new_zeros(bsz * tsz * self.groups, self.num_vars)
.scatter_(-1, idx.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
if produce_targets:
result["targets"] = idx
if self.time_first:
x = x.transpose(1, 2) # BCT -> BTC
result["x"] = x
ze = ze.float()
zq = zq.float()
latent_loss = self.mse_mean(zq, ze.detach())
commitment_loss = self.mse_mean(ze, zq.detach())
result["kmeans_loss"] = latent_loss + self.gamma * commitment_loss
return result
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/kmeans_vector_quantizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
class LSTMCellWithZoneOut(nn.Module):
"""
Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations
https://arxiv.org/abs/1606.01305
"""
def __init__(self, prob: float, input_size: int, hidden_size: int,
bias: bool = True):
super(LSTMCellWithZoneOut, self).__init__()
self.lstm_cell = nn.LSTMCell(input_size, hidden_size, bias=bias)
self.prob = prob
if prob > 1.0 or prob < 0.0:
raise ValueError("zoneout probability must be in the range from "
"0.0 to 1.0.")
def zoneout(self, h, next_h, prob):
if isinstance(h, tuple):
return tuple(
[self.zoneout(h[i], next_h[i], prob) for i in range(len(h))]
)
if self.training:
mask = h.new_zeros(*h.size()).bernoulli_(prob)
return mask * h + (1 - mask) * next_h
return prob * h + (1 - prob) * next_h
def forward(self, x, h):
return self.zoneout(h, self.lstm_cell(x, h), self.prob)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/lstm_cell_with_zoneout.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from fairseq.modules.quant_noise import quant_noise
from torch import nn
class AdaptiveInput(nn.Module):
def __init__(
self,
vocab_size: int,
padding_idx: int,
initial_dim: int,
factor: float,
output_dim: int,
cutoff: List[int],
q_noise: float = 0,
qn_block_size: int = 8,
):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert (
vocab_size == cutoff[-1]
), "cannot specify cutoff larger than vocab size"
self.cutoff = cutoff
self.embedding_dim = output_dim
self.padding_idx = padding_idx
self.embeddings = nn.ModuleList()
for i in range(len(self.cutoff)):
prev = self.cutoff[i - 1] if i > 0 else 0
size = self.cutoff[i] - prev
dim = int(initial_dim // (factor ** i))
seq = nn.Sequential(
nn.Embedding(size, dim, self.padding_idx),
quant_noise(
nn.Linear(dim, output_dim, bias=False), q_noise, qn_block_size
),
)
self.embeddings.append(seq)
self.padding_idx = None
self.padding_idx = padding_idx
def init_weights(m):
if isinstance(m, nn.Embedding):
nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
elif hasattr(m, "weight"):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
def weights_for_band(self, band: int):
return self.embeddings[band][0].weight, self.embeddings[band][1].weight
def forward(self, input: torch.Tensor):
result = self._float_tensor.new(input.shape + (self.embedding_dim,))
for i in range(len(self.cutoff)):
mask = input.lt(self.cutoff[i])
if i > 0:
mask.mul_(input.ge(self.cutoff[i - 1]))
chunk_input = input[mask] - self.cutoff[i - 1]
else:
chunk_input = input[mask]
if mask.any():
result[mask] = self.embeddings[i](chunk_input)
return result
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/adaptive_input.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.unfold import unfold1d
def LightweightConv(
input_size,
kernel_size=1,
padding_l=None,
num_heads=1,
weight_dropout=0.0,
weight_softmax=False,
bias=False,
):
if torch.cuda.is_available():
try:
from fairseq.modules.lightconv_layer import LightconvLayer
return LightconvLayer(
input_size,
kernel_size=kernel_size,
padding_l=padding_l,
num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax,
bias=bias,
)
except ImportError as e:
print(e)
return LightweightConv1dTBC(
input_size,
kernel_size=kernel_size,
padding_l=padding_l,
num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax,
bias=bias,
)
class LightweightConv1d(nn.Module):
"""Lightweight Convolution assuming the input is BxCxT
This is just an example that explains LightConv clearer than the TBC version.
We don't use this module in the model.
Args:
input_size: # of channels of the input and output
kernel_size: convolution channels
padding: padding
num_heads: number of heads used. The weight is of shape
`(num_heads, 1, kernel_size)`
weight_softmax: normalize the weight with softmax before the convolution
Shape:
Input: BxCxT, i.e. (batch_size, input_size, timesteps)
Output: BxCxT, i.e. (batch_size, input_size, timesteps)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
"""
def __init__(
self,
input_size,
kernel_size=1,
padding=0,
num_heads=1,
weight_softmax=False,
bias=False,
weight_dropout=0.0,
):
super().__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.num_heads = num_heads
self.padding = padding
self.weight_softmax = weight_softmax
self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, input):
"""
input size: B x C x T
output size: B x C x T
"""
B, C, T = input.size()
H = self.num_heads
weight = self.weight
if self.weight_softmax:
weight = F.softmax(weight, dim=-1)
weight = self.weight_dropout_module(weight)
# Merge every C/H entries into the batch dimension (C = self.input_size)
# B x C x T -> (B * C/H) x H x T
# One can also expand the weight to C x 1 x K by a factor of C/H
# and do not reshape the input instead, which is slow though
input = input.view(-1, H, T)
output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)
output = output.view(B, C, T)
if self.bias is not None:
output = output + self.bias.view(1, -1, 1)
return output
@with_incremental_state
class LightweightConv1dTBC(nn.Module):
"""Lightweight Convolution assuming the input is TxBxC
Args:
input_size: # of channels of the input
kernel_size: convolution channels
padding_l: padding to the left when using "same" padding
num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
weight_dropout: the drop rate of the DropConnect to drop the weight
weight_softmax: normalize the weight with softmax before the convolution
bias: use bias
Shape:
Input: TxBxC, i.e. (timesteps, batch_size, input_size)
Output: TxBxC, i.e. (timesteps, batch_size, input_size)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
"""
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
num_heads=1,
weight_dropout=0.0,
weight_softmax=False,
bias=False,
):
super().__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.weight_softmax = weight_softmax
self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.reset_parameters()
self.onnx_trace = False
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, x, incremental_state=None, unfold=False):
"""Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
incremental_state: A dict to keep the state
unfold: unfold the input or not. If not, we use the matrix trick instead
"""
unfold = unfold or (incremental_state is not None)
if unfold:
output = self._forward_unfolded(x, incremental_state)
else:
output = self._forward_expanded(x, incremental_state)
if self.bias is not None:
output = output + self.bias.view(1, 1, -1)
return output
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def _forward_unfolded(self, x, incremental_state):
"""The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right."""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K)
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
else:
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0)
x_unfold = x_unfold.view(T * B * H, R, K)
if self.weight_softmax:
weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(
weight
)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2) :]
K = weight.size(1)
weight = (
weight.view(1, H, K).expand(T * B, H, K).contiguous().view(T * B * H, K, 1)
)
weight = self.weight_dropout_module(weight)
output = torch.bmm(x_unfold, weight) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_state):
"""Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
"""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K)
if self.weight_softmax:
weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(
weight
)
weight = weight.view(1, H, K).expand(T * B, H, K).contiguous()
weight = weight.view(T, B * H, K).transpose(0, 1)
x = x.view(T, B * H, R).transpose(0, 1)
P = self.padding_l
if K > T and P == K - 1:
weight = weight.narrow(2, K - T, T)
K, P = T, T - 1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)
weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_(
weight
)
weight_expanded = weight_expanded.narrow(2, P, T)
weight_expanded = self.weight_dropout_module(weight_expanded)
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def extra_repr(self):
s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}".format(
self.input_size,
self.kernel_size,
self.padding_l,
self.num_heads,
self.weight_softmax,
self.bias is not None,
)
if self.weight_dropout_module.p > 0.0:
s += ", weight_dropout={}".format(self.weight_dropout_module.p)
return s
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/lightweight_convolution.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class ScalarBias(torch.autograd.Function):
"""
Adds a vector of scalars, used in self-attention mechanism to allow
the model to optionally attend to this vector instead of the past
"""
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, size[dim] - 1).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/scalar_bias.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from .learned_positional_embedding import LearnedPositionalEmbedding
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
):
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim,
padding_idx,
init_size=num_embeddings + padding_idx + 1,
)
return m
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/positional_embedding.py |
import torch.nn as nn
from .fairseq_dropout import FairseqDropout
class Lora(nn.Module):
def __init__(self, u_dim, r, v_dim, shared_u=None, shared_v=None, dropout=0.0):
super().__init__()
self.u_dim = u_dim
self.d_dim = r
self.v_dim = v_dim
self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__)
if shared_u is not None:
assert shared_u.weight.size() == (r, u_dim)
self.linear_u = shared_u
else:
self.linear_u = nn.Linear(u_dim, r)
if shared_v is not None:
assert shared_v.weight.size() == (v_dim, r)
self.linear_v = shared_v
else:
self.linear_v = nn.Linear(r, v_dim)
def forward(self, x):
x = self.linear_u(x)
x = self.dropout_module(x)
x = self.linear_v(x)
x = self.dropout_module(x)
return x
@classmethod
def ratio_r(cls, layerid): # only consider 6 layers: 2 1.5 1 0.5 0.5 0.5
if layerid < 4:
return 2 - 0.5 * layerid
else:
return 0.5
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/lora.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from .multihead_attention import MultiheadAttention
class SparseMultiheadAttention(MultiheadAttention):
"""Sparse Multi-Headed Attention.
"Generating Long Sequences with Sparse Transformers". Implements
fixed factorized self attention, where l=stride and c=expressivity.
A(1) includes all words in the stride window and A(2) takes a summary of c
words from the end of each stride window.
If is_bidirectional=False, we do not include any words past the current word,
as in the paper.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
stride=32,
expressivity=8,
is_bidirectional=True,
):
super().__init__(
embed_dim,
num_heads,
kdim,
vdim,
dropout,
bias,
add_bias_kv,
add_zero_attn,
self_attention,
encoder_decoder_attention,
)
self.is_bidirectional = is_bidirectional
self.stride = stride
self.expressivity = expressivity
assert self.stride > 0 and self.stride >= self.expressivity
# Used for Ai(2) calculations - beginning of [l-c, l] range
def compute_checkpoint(self, word_index):
if word_index % self.stride == 0 and word_index != 0:
checkpoint_index = word_index - self.expressivity
else:
checkpoint_index = (
math.floor(word_index / self.stride) * self.stride
+ self.stride
- self.expressivity
)
return checkpoint_index
# Computes Ai(2)
def compute_subset_summaries(self, absolute_max):
checkpoint_index = self.compute_checkpoint(0)
subset_two = set()
while checkpoint_index <= absolute_max - 1:
summary = set(
range(
checkpoint_index,
min(checkpoint_index + self.expressivity + 1, absolute_max),
)
)
subset_two = subset_two.union(summary)
checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride)
return subset_two
# Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf
def compute_fixed_attention_subset(self, word_index, tgt_len):
# +1s account for range function; [min, max) -> [min, max]
if not self.is_bidirectional:
absolute_max = word_index + 1
else:
absolute_max = tgt_len
# Subset 1 - whole window
rounded_index = (
math.floor((word_index + self.stride) / self.stride) * self.stride
)
if word_index % self.stride == 0 and word_index != 0:
subset_one = set(
range(word_index - self.stride, min(absolute_max, word_index + 1))
)
else:
subset_one = set(
range(
max(0, rounded_index - self.stride),
min(absolute_max, rounded_index + 1),
)
)
# Subset 2 - summary per window
# If bidirectional, subset 2 is the same for every index
subset_two = set()
if not self.is_bidirectional:
subset_two = self.compute_subset_summaries(absolute_max)
return subset_one.union(subset_two)
# Compute sparse mask - if bidirectional, can pre-compute and store
def buffered_sparse_mask(self, tensor, tgt_len, src_len):
assert tgt_len > self.stride
sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf"))
# If bidirectional, subset 2 is the same for every index
subset_summaries = set()
if self.is_bidirectional:
subset_summaries = self.compute_subset_summaries(tgt_len)
for i in range(tgt_len):
fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len)
fixed_attention_subset = fixed_attention_subset.union(subset_summaries)
included_word_indices = torch.LongTensor(list(fixed_attention_subset))
sparse_mask[i].index_fill_(0, included_word_indices, 0)
return sparse_mask.type_as(tensor)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len)
sparse_mask = sparse_mask.unsqueeze(0).expand(
bsz * self.num_heads, tgt_len, src_len
)
attn_weights += sparse_mask
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/sparse_multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
transpose last 2 dimensions of the input
"""
import torch.nn as nn
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/transpose_last.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Optional
import torch
import torch.onnx.operators
from fairseq import utils
from torch import Tensor, nn
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx if padding_idx is not None else 0
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.onnx_trace = False
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.max_positions = int(1e5)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
if self.onnx_trace:
return (
self.weights.index_select(index=self.padding_idx + pos, dim=0)
.unsqueeze(1)
.repeat(bsz, 1, 1)
)
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
(bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
)
return embeddings
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/sinusoidal_positional_embedding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if torch.jit.is_scripting():
export = True
if not export and torch.cuda.is_available() and has_fused_layernorm:
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/layer_norm.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from inspect import isfunction
from operator import mul
from functools import reduce, wraps
from aml.multimodal_video.utils.einops.lib import rearrange, repeat
from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange
from fairseq.modules.local_attention import LocalAttention
# constants
TOKEN_SELF_ATTN_VALUE = -5e4
KMEAN_INIT_ITERS = 10
# helper functions
def exists(val):
return val is not None
def identity(x, *args, **kwargs):
return x
def default(x, d):
if not exists(x):
return d if not isfunction(d) else d()
return x
def cast_tuple(x):
return x if isinstance(x, tuple) else (x,)
def cache_fn(f):
cache = None
@wraps(f)
def cached_fn(*args, **kwargs):
nonlocal cache
if exists(cache):
return cache
cache = f(*args, **kwargs)
return cache
return cached_fn
def to(t):
return {'device': t.device, 'dtype': t.dtype}
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
def is_empty(t):
return t.nelement() == 0
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(2, expand_dim(indices, -1, last_dim))
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def expand_dim(t, dim, k):
t = t.unsqueeze(dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def scatter_mean(src, t, index, dim, eps=1e-5):
numer = src.scatter_add(dim, index, t)
denom = src.scatter_add(dim, index, torch.ones_like(t))
return numer / (denom + eps)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
def reshape_dim(t, dim, split_dims):
shape = list(t.shape)
num_dims = len(shape)
dim = (dim + num_dims) % num_dims
shape[dim:dim+1] = split_dims
return t.reshape(shape)
def ema(old, new, decay):
if not exists(old):
return new
return old * decay + new * (1 - decay)
def ema_inplace(moving_avg, new, decay):
if is_empty(moving_avg):
moving_avg.data.copy_(new)
return
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
# helper classes
def map_first_tuple_or_el(x, fn):
if isinstance(x, tuple):
return (fn(x[0]),) + x[1:]
return fn(x)
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim=-1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x, **kwargs):
if self.chunks <= 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.chunks, dim=self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim=self.dim)
class PreNorm(nn.ModuleList):
def __init__(self, norm_class, dim, fn):
super().__init__()
self.norm = norm_class(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.residual_weight = nn.Parameter(torch.zeros(1))
self.fn = fn
def forward(self, x, **kwargs):
x = self.fn(x, **kwargs)
return map_first_tuple_or_el(x, lambda t: t * self.residual_weight)
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x):
def norm(t):
n = torch.norm(t, dim=-1, keepdim=True).clamp(min=self.eps)
return t / n * self.g
return map_first_tuple_or_el(x, norm)
class ProjectInOut(nn.Module):
def __init__(self, fn, dim_in, dim_out, project_out=True):
super().__init__()
self.fn = fn
self.project_in = nn.Linear(dim_in, dim_out)
self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity
def forward(self, x, **kwargs):
x = self.project_in(x)
x, loss = self.fn(x, **kwargs)
x = self.project_out(x)
return x, loss
class MatrixMultiply(nn.Module):
def __init__(self, tensor, transpose=False):
super().__init__()
self.tensor = tensor
self.transpose = transpose
def forward(self, x):
tensor = self.tensor
if self.transpose:
tensor = tensor.t()
return x @ tensor
# positional embeddings
class DepthWiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size, stride=1, bias=True, causal=False):
super().__init__()
self.padding = ((kernel_size - 1), 0) if causal else (kernel_size // 2, kernel_size // 2)
self.net = nn.Sequential(
nn.Conv1d(dim_in, dim_in, kernel_size=kernel_size, groups=dim_in, stride=stride, bias=bias),
nn.Conv1d(dim_in, dim_out, 1, bias=bias)
)
def forward(self, x):
x = F.pad(x, self.padding, value=0.)
return self.net(x)
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
position = torch.arange(0, max_seq_len, dtype=torch.float)
sinusoid_inp = torch.einsum("i,j->ij", position, inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
self.register_buffer('emb', emb)
def forward(self, x):
return self.emb[None, :x.shape[1], :].to(x)
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j=2)
x1, x2 = x.unbind(dim=-1)
x = torch.stack((-x2, x1), dim=-1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(q, k, sinu_pos):
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j=2)
sin, cos = sinu_pos.unbind(dim=-2)
sin, cos = map(lambda t: repeat(t, 'b n -> b (n j)', j=2), (sin, cos))
q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
return q, k
# kmeans related function and class
def update_kmeans_on_backwards(module):
module.kmean_modules = find_modules(module, Kmeans)
def hook(_, grad_in, grad_out):
for m in module.kmean_modules:
m.update()
return module.register_backward_hook(hook)
def similarity(x, means):
return torch.einsum('bhld,hcd->bhlc', x, means)
def dists_and_buckets(x, means):
dists = similarity(x, means)
_, buckets = torch.max(dists, dim=-1)
return dists, buckets
def batched_bincount(index, num_classes, dim=-1):
shape = list(index.shape)
shape[dim] = num_classes
out = index.new_zeros(shape)
out.scatter_add_(dim, index, torch.ones_like(index, dtype=index.dtype))
return out
def kmeans_iter(x, means, buckets=None):
b, h, _, d, dtype, num_clusters = *x.shape, x.dtype, means.shape[1]
if not exists(buckets):
_, buckets = dists_and_buckets(x, means)
bins = batched_bincount(buckets, num_clusters).sum(0, keepdim=True)
zero_mask = bins.long() == 0
means_ = buckets.new_zeros(b, h, num_clusters, d, dtype=dtype)
means_.scatter_add_(-2, expand_dim(buckets, -1, d), x)
means_ = F.normalize(means_.sum(0, keepdim=True), dim=-1).type(dtype)
means = torch.where(zero_mask.unsqueeze(-1), means, means_)
means = means.squeeze(0)
return means
def distribution(dists, window_size):
_, topk_indices = dists.topk(k=window_size, dim=-2)
indices = topk_indices.transpose(-2, -1)
return indices.reshape(*indices.size()[:2], -1)
class Kmeans(nn.Module):
def __init__(self, num_heads, head_dim, num_clusters, ema_decay=0.999, commitment=1e-4):
super().__init__()
self.commitment = commitment
self.ema_decay = ema_decay
self.register_buffer('means', torch.randn(num_heads, num_clusters, head_dim))
self.register_buffer('initted', torch.tensor(False))
self.num_new_means = 0
self.new_means = None
@torch.no_grad()
def init(self, x):
if self.initted:
return
_, h, _, d, device, _ = *x.shape, x.device, x.dtype
num_clusters = self.means.shape[1]
means = x.transpose(0, 1).contiguous().view(h, -1, d)
num_samples = means.shape[1]
if num_samples >= num_clusters:
indices = torch.randperm(num_samples, device=device)[:num_clusters]
else:
indices = torch.randint(0, num_samples, (num_clusters,), device=device)
means = means[:, indices]
for _ in range(KMEAN_INIT_ITERS):
means = kmeans_iter(x, means)
self.num_new_means = 0
self.means.data.copy_(means)
self.initted.data.copy_(torch.tensor(True))
@torch.no_grad()
def update(self, new_means=None):
new_means = default(new_means, self.new_means)
assert exists(new_means), 'new kmeans has not been supplied'
ema_inplace(self.means, new_means, self.ema_decay)
del self.new_means
self.new_means = None
self.num_new_means = 0
def forward(self, x, update_means=False):
self.init(x)
b, dtype = x.shape[0], x.dtype
means = self.means.type(dtype)
x = F.normalize(x, 2, dim=-1).type(dtype)
with torch.no_grad():
dists, buckets = dists_and_buckets(x, means)
routed_means = batched_index_select(expand_dim(means, 0, b), buckets)
loss = F.mse_loss(x, routed_means) * self.commitment
if update_means:
with torch.no_grad():
means = kmeans_iter(x, means, buckets)
self.new_means = ema(self.new_means, means, self.num_new_means / (self.num_new_means + 1))
self.num_new_means += 1
return dists, loss
# kmeans attention class
class KmeansAttention(nn.Module):
def __init__(self, num_clusters, window_size, num_heads, head_dim, causal=False, dropout=0., ema_decay=0.999, commitment=1e-4, context_window_size=None, receives_context=False, num_mem_kv=0, shared_qk=False):
super().__init__()
self.num_heads = num_heads
self.num_clusters = num_clusters
self.head_dim = head_dim
self.window_size = window_size
self.context_window_size = default(context_window_size, window_size)
self.causal = causal
self.shared_qk = shared_qk
self.receives_context = receives_context
self.kmeans = Kmeans(num_heads, head_dim, num_clusters, ema_decay, commitment)
self.dropout = nn.Dropout(dropout)
self.num_mem_kv = max(num_mem_kv, 1 if causal and not shared_qk else 0)
self.mem_key = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim))
self.mem_value = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim))
def forward(self, q, k, v, query_mask=None, key_mask=None, **kwargs):
b, h, t, d, kv_t, wsz, c_wsz, nc, device, dtype = *q.shape, k.shape[2], self.window_size, self.context_window_size, self.num_clusters, q.device, q.dtype
is_reverse = kwargs.pop('_reverse', False)
out = torch.zeros_like(q, dtype=dtype)
update_kmeans = self.training and not is_reverse
key_mask = default(key_mask, query_mask) if not self.receives_context else key_mask
kv_wsz = wsz if not self.receives_context else c_wsz
wsz = min(wsz, t)
kv_wsz = min(kv_wsz, kv_t)
if not self.shared_qk or self.receives_context:
dists, aux_loss = self.kmeans(torch.cat((q, k), dim=2), update_kmeans)
q_dists, k_dists = split_at_index(2, t, dists)
indices = distribution(q_dists, wsz)
kv_indices = distribution(k_dists, kv_wsz)
else:
dists, aux_loss = self.kmeans(q, update_kmeans)
k = F.normalize(k, dim=-1).to(q)
indices = distribution(dists, wsz)
kv_indices = indices
q = batched_index_select(q, indices)
k = batched_index_select(k, kv_indices)
v = batched_index_select(v, kv_indices)
reshape_with_window = lambda x: x.reshape(b, h, nc, -1, d)
q, k, v = map(reshape_with_window, (q, k, v))
m_k, m_v = map(lambda x: expand_dim(x, 0, b).to(q), (self.mem_key, self.mem_value))
k, v = map(lambda x: torch.cat(x, dim=3), ((m_k, k), (m_v, v)))
dots = torch.einsum('bhnid,bhnjd->bhnij', q, k) * (d ** -0.5)
mask_value = max_neg_value(dots)
if exists(query_mask) or exists(key_mask):
query_mask = default(query_mask, lambda: torch.ones((b, t), device=device).bool())
key_mask = default(key_mask, lambda: torch.ones((b, kv_t), device=device).bool())
q_mask = expand_dim(query_mask, 1, h).gather(2, indices)
kv_mask = expand_dim(key_mask, 1, h).gather(2, kv_indices)
q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (q_mask, kv_mask))
mask = q_mask[:, :, :, :, None] * kv_mask[:, :, :, None, :]
mask = F.pad(mask, (self.num_mem_kv, 0), value=1)
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal:
q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices))
mask = q_mask[:, :, :, :, None] >= kv_mask[:, :, :, None, :]
mask = F.pad(mask, (self.num_mem_kv, 0), value=1)
dots.masked_fill_(~mask, mask_value)
del mask
if self.shared_qk:
q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices))
mask = q_mask[:, :, :, :, None] == kv_mask[:, :, :, None, :]
mask = F.pad(mask, (self.num_mem_kv, 0), value=0)
dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE)
del mask
dots = dots.softmax(dim=-1)
dots = self.dropout(dots)
bo = torch.einsum('bhcij,bhcjd->bhcid', dots, v)
so = torch.reshape(bo, (b, h, -1, bo.shape[-1])).type(dtype)
out = scatter_mean(out, so, indices.unsqueeze(-1).expand_as(so), -2)
return out, aux_loss
# feedforward
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult=4, dropout=0., activation=None, glu=False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
# self attention
class SelfAttention(nn.Module):
def __init__(self, dim, max_seq_len, heads, local_attn_heads, window_size, dim_head=None, local_attn_window_size=None, local_attn_radius_blocks=1, causal=False, attn_dropout=0., dropout=0., kmeans_ema_decay=0.999, commitment_factor=1e-4, receives_context=False, context_window_size=None, rel_pos_emb=True, num_mem_kv=0, shared_qk=False, conv_query_kernel=9):
super().__init__()
assert dim_head or (dim % heads) == 0, 'hidden dimension must be divisible by number of heads'
assert (max_seq_len % window_size) == 0, 'maximum sequence length must be divisible by the target window size'
assert local_attn_heads <= heads, 'number of local attention heads must be less than total heads'
assert not (receives_context and local_attn_heads > 0), 'local attention cannot be used for self attention with context'
assert not (receives_context and causal), 'contextual attention layer cannot be causal'
local_attn_window_size = default(local_attn_window_size, window_size)
context_window_size = default(context_window_size, window_size)
self.shared_qk = shared_qk
self.receives_context = receives_context
self.heads = heads
self.local_attn_heads = local_attn_heads
self.global_attn_heads = heads - local_attn_heads
self.causal = causal
self.window_size = window_size
dim_head = default(dim_head, dim // heads)
dim_heads = dim_head * heads
self.dim_head = dim_head
num_clusters = max_seq_len // window_size
# local
local_dim_heads = dim_head * self.local_attn_heads
if self.local_attn_heads > 0:
rel_pos_emb_config = (dim_head, local_attn_heads) if rel_pos_emb else None
self.local_attn = LocalAttention(dim=dim_head, window_size=local_attn_window_size, causal=causal, dropout=attn_dropout, rel_pos_emb_config=rel_pos_emb_config, look_backward=local_attn_radius_blocks, look_forward=0 if causal else local_attn_radius_blocks)
self.local_to_qkv = nn.Linear(dim, 3 * local_dim_heads)
# global
global_dim_heads = dim_head * self.global_attn_heads
if self.global_attn_heads > 0:
self.global_attn = KmeansAttention(num_clusters, window_size, self.global_attn_heads, dim_head, causal=causal, dropout=attn_dropout, ema_decay=kmeans_ema_decay, commitment=commitment_factor, receives_context=receives_context, num_mem_kv=num_mem_kv, shared_qk=shared_qk)
self.to_q = nn.Sequential(
Rearrange('b n c -> b c n'),
DepthWiseConv1d(dim, global_dim_heads, conv_query_kernel, causal=causal),
Rearrange('b c n -> b n c')
)
self.to_v = nn.Linear(dim, global_dim_heads, bias=False)
if not self.shared_qk:
self.to_k = nn.Linear(dim, global_dim_heads, bias=False)
# out
self.to_out = nn.Linear(dim_heads, dim, bias=False)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, context=None, key_padding_mask=None, context_mask=None, pos_emb=None, **kwargs):
assert not (self.receives_context and not exists(context)), 'context must be passed if self attention is set to receive context'
input_mask = key_padding_mask
x = query.transpose(0, 1)
b, t, _, h, dh = *x.shape, self.heads, self.dim_head
has_local, has_global = map(lambda x: x > 0, (self.local_attn_heads, self.global_attn_heads))
split_heads = lambda v: reshape_dim(v, -1, (-1, dh)).transpose(1, 2).contiguous()
if has_local:
local_qkv = self.local_to_qkv(x).chunk(3, dim=-1)
lq, lk, lv = map(split_heads, local_qkv)
if has_global:
kv_input = x if not self.receives_context else context
q, v = self.to_q(x), self.to_v(kv_input)
if not self.shared_qk:
k = self.to_k(kv_input)
else:
k = self.to_q(kv_input) if self.receives_context else q
q, k, v = map(split_heads, (q, k, v))
out = []
total_loss = torch.tensor(0., requires_grad=True, **to(x))
if has_local:
local_out = self.local_attn(lq, lk, lv, input_mask=input_mask)
out.append(local_out)
if has_global:
if not self.receives_context and exists(pos_emb):
q, k = apply_rotary_pos_emb(q, k, pos_emb)
global_out, loss = self.global_attn(q, k, v, query_mask=input_mask, key_mask=context_mask)
total_loss = total_loss + loss
out.append(global_out)
out = torch.cat(out, dim=1)
out = out.reshape(b, h, t, -1).transpose(1, 2).reshape(b, t, -1)
out = self.dropout(out.transpose(0, 1))
# out = self.to_out(out)
return out, total_loss
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/kmeans_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
class SamePad(nn.Module):
def __init__(self, kernel_size, causal=False):
super().__init__()
if causal:
self.remove = kernel_size - 1
else:
self.remove = 1 if kernel_size % 2 == 0 else 0
def forward(self, x):
if self.remove > 0:
x = x[:, :, : -self.remove]
return x
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/same_pad.py |
EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def parse_config_yaml(yaml_data):
# Initialize to default options.
quantization_options = {
"n_centroids": {
"Linear": ["in_features", {"*": 256}],
"Embedding": ["embedding_dim", {"*": 256}],
},
"block_sizes": {
"Linear": ["fuzzy_name", {"fc": 8, "attn": 4, "emb": 4}],
"Embedding": ["fuzzy_name", {"emb": 8}],
},
"layers_to_quantize": [
"decoder\\.layers\\.\\d+\\.fc[12]",
"decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01]",
"decoder\\.layers\\.\\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj)",
],
}
if "n_centroids" in yaml_data:
quantization_options["n_centroids"] = {
layer: convert_yaml_to_tuple(layer_data)
for layer, layer_data in yaml_data["n_centroids"].items()
}
if "block_sizes" in yaml_data:
quantization_options["block_sizes"] = {
layer: convert_yaml_to_tuple(layer_data)
for layer, layer_data in yaml_data["block_sizes"].items()
}
if "layers_to_quantize" in yaml_data:
quantization_options["layers_to_quantize"] = yaml_data["layers_to_quantize"]
return quantization_options
def convert_yaml_to_tuple(yaml_dictionary):
"""Converts a yaml dictionary with two keys: `key` and `value` into a two
argument tuple of those values."""
return (yaml_dictionary["key"], yaml_dictionary["value"])
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/quantization_options.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .utils import SizeTracker, get_param, attrsetter, quantize_model_ # NOQA
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/pq/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .em import EM, EmptyClusterResolveError
class PQ(EM):
"""
Quantizes the layer weights W with the standard Product Quantization
technique. This learns a codebook of codewords or centroids of size
block_size from W. For further reference on using PQ to quantize
neural networks, see "And the Bit Goes Down: Revisiting the Quantization
of Neural Networks", Stock et al., ICLR 2020.
PQ is performed in two steps:
(1) The matrix W (weights or fully-connected or convolutional layer)
is reshaped to (block_size, -1).
- If W is fully-connected (2D), its columns are split into
blocks of size block_size.
- If W is convolutional (4D), its filters are split along the
spatial dimension.
(2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix.
Args:
- W: weight matrix to quantize of size (in_features x out_features)
- block_size: size of the blocks (subvectors)
- n_centroids: number of centroids
- n_iter: number of k-means iterations
- eps: for cluster reassignment when an empty cluster is found
- max_tentatives for cluster reassignment when an empty cluster is found
- verbose: print information after each iteration
Remarks:
- block_size be compatible with the shape of W
"""
def __init__(
self,
W,
block_size,
n_centroids=256,
n_iter=20,
eps=1e-6,
max_tentatives=30,
verbose=True,
):
self.block_size = block_size
W_reshaped = self._reshape(W)
super(PQ, self).__init__(
W_reshaped,
n_centroids=n_centroids,
n_iter=n_iter,
eps=eps,
max_tentatives=max_tentatives,
verbose=verbose,
)
def _reshape(self, W):
"""
Reshapes the matrix W as expained in step (1).
"""
# fully connected: by convention the weight has size out_features x in_features
if len(W.size()) == 2:
self.out_features, self.in_features = W.size()
assert (
self.in_features % self.block_size == 0
), "Linear: n_blocks must be a multiple of in_features"
return (
W.reshape(self.out_features, -1, self.block_size)
.permute(2, 1, 0)
.flatten(1, 2)
)
# convolutional: we reshape along the spatial dimension
elif len(W.size()) == 4:
self.out_channels, self.in_channels, self.k_h, self.k_w = W.size()
assert (
self.in_channels * self.k_h * self.k_w
) % self.block_size == 0, (
"Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w"
)
return (
W.reshape(self.out_channels, -1, self.block_size)
.permute(2, 1, 0)
.flatten(1, 2)
)
# not implemented
else:
raise NotImplementedError(W.size())
def encode(self):
"""
Performs self.n_iter EM steps.
"""
self.initialize_centroids()
for i in range(self.n_iter):
try:
self.step(i)
except EmptyClusterResolveError:
break
def decode(self):
"""
Returns the encoded full weight matrix. Must be called after
the encode function.
"""
# fully connected case
if "k_h" not in self.__dict__:
return (
self.centroids[self.assignments]
.reshape(-1, self.out_features, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
# convolutional case
else:
return (
self.centroids[self.assignments]
.reshape(-1, self.out_channels, self.block_size)
.permute(1, 0, 2)
.reshape(self.out_channels, self.in_channels, self.k_h, self.k_w)
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/pq/pq.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
from collections import Counter
import torch
class EM:
"""
EM algorithm used to quantize the columns of W to minimize
||W - W_hat||^2
Args:
- W: weight matrix of size (in_features x out_features)
- n_iter: number of k-means iterations
- n_centroids: number of centroids (size of codebook)
- eps: for cluster reassignment when an empty cluster is found
- max_tentatives for cluster reassignment when an empty cluster is found
- verbose: print error after each iteration
Remarks:
- If one cluster is empty, the most populated cluster is split into
two clusters
- All the relevant dimensions are specified in the code
"""
def __init__(
self, W, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True
):
self.W = W
self.n_centroids = n_centroids
self.n_iter = n_iter
self.eps = eps
self.max_tentatives = max_tentatives
self.verbose = verbose
self.centroids = torch.Tensor()
self.assignments = torch.Tensor()
self.objective = []
def initialize_centroids(self):
"""
Initializes the centroids by sampling random columns from W.
"""
in_features, out_features = self.W.size()
indices = torch.randint(
low=0, high=out_features, size=(self.n_centroids,)
).long()
self.centroids = self.W[:, indices].t() # (n_centroids x in_features)
def step(self, i):
"""
There are two standard steps for each iteration: expectation (E) and
minimization (M). The E-step (assignment) is performed with an exhaustive
search and the M-step (centroid computation) is performed with
the exact solution.
Args:
- i: step number
Remarks:
- The E-step heavily uses PyTorch broadcasting to speed up computations
and reduce the memory overhead
"""
# assignments (E-step)
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
n_empty_clusters = self.resolve_empty_clusters()
# centroids (M-step)
for k in range(self.n_centroids):
W_k = self.W[:, self.assignments == k] # (in_features x size_of_cluster_k)
self.centroids[k] = W_k.mean(dim=1) # (in_features)
# book-keeping
obj = (self.centroids[self.assignments].t() - self.W).norm(p=2).item()
self.objective.append(obj)
if self.verbose:
logging.info(
f"Iteration: {i},\t"
f"objective: {obj:.6f},\t"
f"resolved empty clusters: {n_empty_clusters}"
)
def resolve_empty_clusters(self):
"""
If one cluster is empty, the most populated cluster is split into
two clusters by shifting the respective centroids. This is done
iteratively for a fixed number of tentatives.
"""
# empty clusters
counts = Counter(map(lambda x: x.item(), self.assignments))
empty_clusters = set(range(self.n_centroids)) - set(counts.keys())
n_empty_clusters = len(empty_clusters)
tentatives = 0
while len(empty_clusters) > 0:
# given an empty cluster, find most populated cluster and split it into two
k = random.choice(list(empty_clusters))
m = counts.most_common(1)[0][0]
e = torch.randn_like(self.centroids[m]) * self.eps
self.centroids[k] = self.centroids[m].clone()
self.centroids[k] += e
self.centroids[m] -= e
# recompute assignments
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
# check for empty clusters
counts = Counter(map(lambda x: x.item(), self.assignments))
empty_clusters = set(range(self.n_centroids)) - set(counts.keys())
# increment tentatives
if tentatives == self.max_tentatives:
logging.info(
f"Could not resolve all empty clusters, {len(empty_clusters)} remaining"
)
raise EmptyClusterResolveError
tentatives += 1
return n_empty_clusters
def compute_distances(self):
"""
For every centroid m, computes
||M - m[None, :]||_2
Remarks:
- We rely on PyTorch's broadcasting to speed up computations
and reduce the memory overhead
- Without chunking, the sizes in the broadcasting are modified as:
(n_centroids x n_samples x out_features) -> (n_centroids x out_features)
- The broadcasting computation is automatically chunked so that
the tensors fit into the memory of the GPU
"""
nb_centroids_chunks = 1
while True:
try:
return torch.cat(
[
(self.W[None, :, :] - centroids_c[:, :, None]).norm(p=2, dim=1)
for centroids_c in self.centroids.chunk(
nb_centroids_chunks, dim=0
)
],
dim=0,
)
except RuntimeError:
nb_centroids_chunks *= 2
def assign(self):
"""
Assigns each column of W to its closest centroid, thus essentially
performing the E-step in train().
Remarks:
- The function must be called after train() or after loading
centroids using self.load(), otherwise it will return empty tensors
"""
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
def save(self, path, layer):
"""
Saves centroids and assignments.
Args:
- path: folder used to save centroids and assignments
"""
torch.save(self.centroids, os.path.join(path, "{}_centroids.pth".format(layer)))
torch.save(
self.assignments, os.path.join(path, "{}_assignments.pth".format(layer))
)
torch.save(self.objective, os.path.join(path, "{}_objective.pth".format(layer)))
def load(self, path, layer):
"""
Loads centroids and assignments from a given path
Args:
- path: folder use to load centroids and assignments
"""
self.centroids = torch.load(
os.path.join(path, "{}_centroids.pth".format(layer))
)
self.assignments = torch.load(
os.path.join(path, "{}_assignments.pth".format(layer))
)
self.objective = torch.load(
os.path.join(path, "{}_objective.pth".format(layer))
)
class EmptyClusterResolveError(Exception):
pass
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/pq/em.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
from operator import attrgetter, itemgetter
import torch
import numpy as np
import torch.distributed as dist
import torch.nn as nn
from .modules import PQConv2d, PQEmbedding, PQLinear
from .pq import PQ
def quantize_model_(
model,
size_tracker,
layers_to_quantize,
block_sizes_config,
n_centroids_config,
step=0,
n_iter=15,
eps=1e-6,
max_tentatives=100,
remove_weights=False,
verbose=True,
state_dict=None,
):
"""
Quantize a model in-place by stages. All the targeted
layers are replaced by their quantized counterpart,
and the model is ready for the finetuning of the
centroids in a standard training loop (no modifications
required). Note that we do not quantize biases.
Args:
- model: a nn.Module
- size_tracker: useful for tracking quatization statistics
- layers_to_quantize: a list containing regexps for
filtering the layers to quantize at each stage according
to their name (as in model.named_parameters())
- block_sizes_config: dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
- n_centroids_config: dict like
{
'Conv2d': ('kernel_size', {'*': 256}),
'Linear': ('in_features', {'*': 256})
}
For instance, all conv2d layers are quantized with 256 centroids
- step: the layers to quantize inplace corresponding
to layers_to_quantize[step]
"""
quantized_layers = get_layers(model, layers_to_quantize[step], remove_weights=remove_weights)
for layer in quantized_layers:
# book-keeping
is_master_process = (not dist.is_initialized()) or (
dist.is_initialized() and dist.get_rank() == 0
)
verbose = verbose and is_master_process
# get block size and centroids
module = attrgetter(layer)(model)
block_size = get_param(module, layer, block_sizes_config)
n_centroids = get_param(module, layer, n_centroids_config)
if verbose:
logging.info(
f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids"
)
# quantize layer
weight = module.weight.data.clone()
is_bias = "bias" in [x[0] for x in module.named_parameters()]
bias = module.bias.data.clone() if is_bias else None
quantizer = PQ(
weight,
block_size,
n_centroids=n_centroids,
n_iter=n_iter,
eps=eps,
max_tentatives=max_tentatives,
verbose=verbose,
)
# quantization performed on all GPUs with same seed
quantizer.encode()
centroids = quantizer.centroids.contiguous()
assignments = quantizer.assignments.contiguous()
# If n_iter = 0 and state_dict is provided, then
# we initialize random assignments and centroids to
# random values of the appropriate dimensions
# because the quantized model parameters will
# overwritten by the state_dict later on.
if n_iter == 0 and state_dict:
# Initialize random centroids of the correct size
centroids = torch.rand(centroids.size())
centroids.cuda()
# Get counts and assignment keys from layer in loaded checkpoint.
counts_key = layer+"."+"counts"
assignment_key = layer+"."+"assignments"
# Get number of different bins to include.
counts = list(state_dict[counts_key].shape)[0]
print(layer)
print(state_dict[counts_key])
print(counts)
# Initialize random assignments of the correct size
# with an appropriate number of bins.
num_assignments = list(state_dict[assignment_key].shape)[0]
num_extra = num_assignments - counts
print(num_assignments)
print(num_extra)
assignments_bins = torch.arange(counts)
assignments_rand = torch.randint(0, counts-1, (num_extra, ))
assignments = torch.cat((assignments_bins, assignments_rand), 0)
# assignments = assignments.type(torch.IntTensor)
assignments.cuda()
print("assignments")
print(assignments)
# broadcast results to make sure weights are up-to-date
if dist.is_initialized():
dist.broadcast(centroids, 0)
dist.broadcast(assignments, 0)
# instantiate the quantized counterpart
if isinstance(module, nn.Linear):
out_features, in_features = map(
lambda k: module.__dict__[k], ["out_features", "in_features"]
)
quantized_module = PQLinear(
centroids, assignments, bias, in_features, out_features
)
elif isinstance(module, nn.Embedding):
num_embeddings, embedding_dim = map(
lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"]
)
quantized_module = PQEmbedding(
centroids, assignments, num_embeddings, embedding_dim
)
elif isinstance(module, nn.Conv2d):
out_channels, in_channels, kernel_size = map(
lambda k: module.__dict__[k],
["out_channels", "in_channels", "kernel_size"],
)
stride, padding, dilation, groups, padding_mode = map(
lambda k: module.__dict__[k],
["stride", "padding", "dilation", "groups", "padding_mode"],
)
quantized_module = PQConv2d(
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
)
else:
raise ValueError(f"Module {module} not yet supported for quantization")
# replace layer by its quantized counterpart
attrsetter(layer)(model, quantized_module)
# update statistics
size_tracker.update(weight, block_size, n_centroids)
# return name of quantized layers
return quantized_layers
def get_layers(model, filter_regexp, remove_weights=False):
"""
Filters out the layers according to a regexp. Note that
we omit biases.
Args:
- model: a nn.Module
- filter_regexp: a regexp to filter the layers to keep
according to their name in model.named_parameters().
For instance, the regexp:
down_layers\\.[123456]\\.(conv[12]|identity\\.conv))
is keeping blocks down_layers from 1 to 6, and inside
each block is keeping conv1, conv2 and identity.conv.
Remarks:
- We add (module\\.)? at the beginning of the regexp to
account for the possible use of nn.parallel.DataParallel
"""
# get all parameter names
all_layers = map(itemgetter(0), model.named_parameters())
# remove biases
all_layers = filter(lambda x: "bias" not in x, all_layers)
# remove .weight in all other names (or .weight_orig is spectral norm)
all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers)
# remove weights indicates whether the weights extension should be removed, in addition to
# weight_orig and weight extension on names
if remove_weights:
all_layers = map(lambda x: x.replace(".weights", ""), all_layers)
all_layers = map(lambda x: x.replace(".weight", ""), all_layers)
# return filtered layers
filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")"
r = re.compile(filter_regexp)
return list(filter(r.match, all_layers))
def get_param(module, layer_name, param_config):
"""
Given a quantization configuration, get the right parameter
for the module to be quantized.
Args:
- module: a nn.Module
- layer_name: the name of the layer
- param_config: a dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
Remarks:
- if 'fuzzy_name' is passed as a parameter, layers whose layer_name
include 'fuzzy_name' will be assigned the given parameter.
In the following example, conv.expand layers will have a block
size of 9 while conv.reduce will have a block size of 4 and all
other layers will have a block size of 2.
{
'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}),
'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4})
}
"""
layer_type = module.__class__.__name__
if layer_type not in param_config:
raise KeyError(f"Layer type {layer_type} not in config for layer {module}")
feature, params = param_config[module.__class__.__name__]
if feature != "fuzzy_name":
feature_value = str(getattr(module, feature))
if feature_value not in params:
if "*" in params:
feature_value = "*"
else:
raise KeyError(
f"{feature}={feature_value} not in config for layer {module}"
)
else:
feature_values = [name for name in params if name in layer_name]
if len(feature_values) == 0:
if "*" in params:
feature_value = "*"
else:
raise KeyError(f"name={layer_name} not in config for {module}")
else:
feature_value = feature_values[0]
return params[feature_value]
class SizeTracker(object):
"""
Class to keep track of the compressed network size with iPQ.
Args:
- model: a nn.Module
Remarks:
- The compressed size is the sum of three components
for each layer in the network:
(1) Storing the centroids given by iPQ in fp16
(2) Storing the assignments of the blocks in int8
(3) Storing all non-compressed elements such as biases
- This cost in only valid if we use 256 centroids (then
indexing can indeed by done with int8).
"""
def __init__(self, model):
self.model = model
self.size_non_compressed_model = self.compute_size()
self.size_non_quantized = self.size_non_compressed_model
self.size_index = 0
self.size_centroids = 0
self.n_quantized_layers = 0
def compute_size(self):
"""
Computes the size of the model (in MB).
"""
res = 0
for _, p in self.model.named_parameters():
res += p.numel()
return res * 4 / 1024 / 1024
def update(self, W, block_size, n_centroids):
"""
Updates the running statistics when quantizing a new layer.
"""
# bits per weights
bits_per_weight = np.log2(n_centroids) / block_size
self.n_quantized_layers += 1
# size of indexing the subvectors of size block_size (in MB)
size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024
self.size_index += size_index_layer
# size of the centroids stored in float16 (in MB)
size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024
self.size_centroids += size_centroids_layer
# size of non-compressed layers, e.g. LayerNorms or biases (in MB)
size_uncompressed_layer = W.numel() * 4 / 1024 / 1024
self.size_non_quantized -= size_uncompressed_layer
def __repr__(self):
size_compressed = (
self.size_index + self.size_centroids + self.size_non_quantized
)
compression_ratio = self.size_non_compressed_model / size_compressed # NOQA
return (
f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. "
f"After quantizing {self.n_quantized_layers} layers, size "
f"(indexing + centroids + other): {self.size_index:.2f} MB + "
f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = "
f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x"
)
def attrsetter(*items):
def resolve_attr(obj, attr):
attrs = attr.split(".")
head = attrs[:-1]
tail = attrs[-1]
for name in head:
obj = getattr(obj, name)
return obj, tail
def g(obj, val):
for attr in items:
resolved_obj, resolved_attr = resolve_attr(obj, attr)
setattr(resolved_obj, resolved_attr, val)
return g
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/pq/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class PQLinear(nn.Module):
"""
Quantized counterpart of nn.Linear module. Stores the centroid, the assignments
and the non-quantized biases. The full weight is re-instantiated at each forward
pass.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_features x n_blocks
- bias: the non-quantized bias
Remarks:
- We refer the reader to the official documentation of the nn.Linear module
for the other arguments and the behavior of the module
- Performance tests on GPU show that this implementation is 15% slower than
the non-quantized nn.Linear module for a standard training loop.
"""
def __init__(self, centroids, assignments, bias, in_features, out_features):
super(PQLinear, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_features = in_features
self.out_features = out_features
# check compatibility
if self.in_features % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.out_features != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.out_features, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
def forward(self, x):
return F.linear(
x,
self.weight,
self.bias,
)
def extra_repr(self):
return f"in_features={self.in_features},\
out_features={self.out_features},\
n_centroids={self.n_centroids},\
block_size={self.block_size},\
bias={self.bias is not None}"
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/pq/modules/qlinear.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class PQConv2d(nn.Module):
"""
Quantized counterpart of nn.Conv2d module. Stores the centroid, the assignments
and the non-quantized biases. The full weight is re-instantiated at each forward
pass and autograd automatically computes the gradients with respect to the
centroids.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_channels x n_blocks
- bias: the non-quantized bias, must be either torch.Tensor or None
Remarks:
- We refer the reader to the official documentation of the nn.Conv2d module
for the other arguments and the behavior of the module.
- Performance tests on GPU show that this implementation is 10% slower than
the non-quantized nn.Conv2d module for a standard training loop.
- During the backward, the gradients are averaged by cluster and not summed.
This explains the hook registered to the centroids.
"""
def __init__(
self,
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode="zeros",
):
super(PQConv2d, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.padding_mode = padding_mode
# check compatibility
if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % out_channels != 0:
raise ValueError("Wrong PQ sizes")
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
if out_channels % groups != 0:
raise ValueError("out_channels must be divisible by groups")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
# register hook for averaging gradients per centroids instead of summing
self.centroids.register_hook(lambda x: x / self.counts[:, None])
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.out_channels, self.block_size)
.permute(1, 0, 2)
.reshape(
self.out_channels, self.in_channels // self.groups, *self.kernel_size
)
)
def forward(self, x):
return F.conv2d(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def extra_repr(self):
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
if self.padding_mode != "zeros":
s += ", padding_mode={padding_mode}"
s += ", n_centroids={n_centroids}, block_size={block_size}"
return s.format(**self.__dict__)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/pq/modules/qconv.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qconv import PQConv2d # NOQA
from .qemb import PQEmbedding # NOQA
from .qlinear import PQLinear # NOQA
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/pq/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class PQEmbedding(nn.Module):
"""
Quantized counterpart of nn.Embedding module. Stores the centroids and
the assignments. The full weight is re-instantiated at each forward
pass.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_features x n_blocks
- bias: the non-quantized bias
Remarks:
- We refer the reader to the official documentation of the nn.Embedding module
for the other arguments and the behavior of the module
- Performance tests on GPU show that this implementation is 10% slower than
the non-quantized nn.Embedding module for a standard training loop.
"""
def __init__(
self,
centroids,
assignments,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
):
super(PQEmbedding, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
# check compatibility
if self.embedding_dim % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.num_embeddings != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.num_embeddings, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
def forward(self, input):
return F.embedding(
input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
def extra_repr(self):
s = "{num_embeddings}, {embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
if self.max_norm is not None:
s += ", max_norm={max_norm}"
if self.norm_type != 2:
s += ", norm_type={norm_type}"
if self.scale_grad_by_freq is not False:
s += ", scale_grad_by_freq={scale_grad_by_freq}"
if self.sparse is not False:
s += ", sparse=True"
s += ", n_centroids={n_centroids}, block_size={block_size}"
return s.format(**self.__dict__)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/pq/modules/qemb.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .utils import quantize_model_ # NOQA
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/scalar/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def emulate_int(w, bits, method, scale=None, zero_point=None):
q = globals()[f"emulate_int8_{method}"]
return q(w, scale=scale, zero_point=zero_point, bits=bits)
def quantize(w, scale, zero_point, bits=8):
# In the default behavior, max_val = 255.
max_val = 2 ** bits - 1
return (
torch.clamp(torch.round(w / scale + zero_point), 0, max_val) - zero_point
) * scale
def emulate_int8_histogram(w, scale=None, zero_point=None, bits=8):
if scale is None:
obs = torch.ao.quantization.observer.HistogramObserver()
obs.to(device=w.device)
_ = obs(w.float())
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point, bits=bits), scale, zero_point
def emulate_int8_channel(w, scale=None, zero_point=None, bits=8):
if scale is None:
obs = torch.ao.quantization.observer.PerChannelMinMaxObserver(
ch_axis=-1, qscheme=torch.per_channel_symmetric
)
obs.to(device=w.device)
_ = obs(w)
scale, zero_point, ch_axis = obs.get_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point, bits=bits), scale, zero_point
def emulate_int8_tensor(w, scale=None, zero_point=None, bits=8):
if scale is None:
obs = torch.ao.quantization.observer.MinMaxObserver()
obs.to(device=w.device)
_ = obs(w)
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point, bits=bits), scale, zero_point
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/scalar/ops.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from operator import attrgetter
import torch.distributed as dist
import torch.nn as nn
from ..pq.utils import attrsetter, get_layers
from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear
MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d}
def quantize_model_(model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False):
"""
Replaces all modules with their scalar quantized counterpart and
registers hooks to quantize the post-ativations of those modules.
Args:
- model: a nn.Module
- p: amount of noise (0 for no noise, 1 to quantize all the weights/activations)
- bits: number of bits
- update_step: update quantization parameters every update_step steps
"""
# quantize all layers
# remove weights indicates whether the weights extension should be removed, in addition to
# weight_orig and weight extension on names
quantized_layers = get_layers(model, "(.*?)", remove_weights=remove_weights)
for layer in quantized_layers:
# book-keeping
is_master_process = (not dist.is_initialized()) or (
dist.is_initialized() and dist.get_rank() == 0
)
# recover module
module = attrgetter(layer)(model)
if is_master_process:
logging.info(
f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}"
)
# quantization params
q_params = {
"p": p,
"update_step": update_step,
"bits": bits,
"method": method,
"counter": 0,
}
# instantiate the quantized counterpart
if isinstance(module, tuple(MAPPING.keys())):
QuantizedModule = MAPPING[module.__class__]
quantized_module = QuantizedModule.__new__(QuantizedModule)
params = module.__dict__
params.update(q_params)
quantized_module.__dict__.update(params)
else:
if is_master_process:
logging.info(f"Module {module} not yet supported for quantization")
continue
# activation quantization
a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method=method)
# replace layer by its quantized counterpart
attrsetter(layer)(model, quantized_module)
# return name of quantized layers
return quantized_layers
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/scalar/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..ops import emulate_int
class IntLinear(nn.Module):
"""
Quantized counterpart of the nn.Linear module that applies QuantNoise during training.
Args:
- in_features: input features
- out_features: output features
- bias: bias or not
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick.
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
in_features,
out_features,
bias=True,
p=0,
update_step=3000,
bits=8,
method="histogram",
):
super(IntLinear, self).__init__()
self.in_features = int(in_features)
self.out_features = int(out_features)
self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.chosen_bias = bias
if self.chosen_bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.chosen_bias:
nn.init.constant_(self.bias, 0.0)
return
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 100 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = F.linear(input, weight, self.bias)
return output
def extra_repr(self):
return "in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}".format(
self.in_features,
self.out_features,
self.bias is not None,
self.p,
self.bits,
self.method,
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/scalar/modules/qlinear.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from ..ops import emulate_int
class IntConv2d(_ConvNd):
"""
Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training.
Args:
- standard nn.Conv2d parameters
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-thgourh estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
p=0,
bits=8,
method="histogram",
update_step=1000,
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(IntConv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
False,
_pair(0),
groups,
bias,
padding_mode,
)
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def _conv_forward(self, input, weight):
if self.padding_mode != "zeros":
return F.conv2d(
F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight,
self.bias,
self.stride,
_pair(0),
self.dilation,
self.groups,
)
return F.conv2d(
input,
weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 100 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = self._conv_forward(input, weight)
return output
def extra_repr(self):
return (
"in_channels={}, out_channels={}, kernel_size={}, stride={}, "
"padding={}, dilation={}, groups={}, bias={}, quant_noise={}, "
"bits={}, method={}".format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.groups,
self.bias is not None,
self.p,
self.bits,
self.method,
)
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/scalar/modules/qconv.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qact import ActivationQuantizer # NOQA
from .qconv import IntConv2d # NOQA
from .qemb import IntEmbedding # NOQA
from .qlinear import IntLinear # NOQA
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/scalar/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..ops import emulate_int
class IntEmbedding(nn.Module):
"""
Quantized counterpart of the nn.Embedding module that applies QuantNoise during training.
Args:
- num_embeddings: number of tokens
- embedding_dim: embedding dimension
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
p=0,
update_step=1000,
bits=8,
method="histogram",
):
super(IntEmbedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if _weight is None:
self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [
num_embeddings,
embedding_dim,
], "Shape of weight does not match num_embeddings and embedding_dim"
self.weight = nn.Parameter(_weight)
self.sparse = sparse
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.normal_(self.weight)
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 1000 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = F.embedding(
input,
weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return output
def extra_repr(self):
s = "{num_embeddings}, {embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
if self.max_norm is not None:
s += ", max_norm={max_norm}"
if self.norm_type != 2:
s += ", norm_type={norm_type}"
if self.scale_grad_by_freq is not False:
s += ", scale_grad_by_freq={scale_grad_by_freq}"
if self.sparse is not False:
s += ", sparse=True"
s += "quant_noise={p}, bits={bits}, method={method}"
return s.format(**self.__dict__)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/scalar/modules/qemb.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ..ops import emulate_int
class ActivationQuantizer:
"""
Fake scalar quantization of the activations using a forward hook.
Args:
- module. a nn.Module for which we quantize the *post-activations*
- p: proportion of activations to quantize, set by default to 1
- update_step: to recompute quantization parameters
- bits: number of bits for quantization
- method: choose among {"tensor", "histogram", "channel"}
- clamp_threshold: to prevent gradients overflow
Remarks:
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- For the list of quantization methods and number of bits, see ops.py
- To remove the hook from the module, simply call self.handle.remove()
- At test time, the activations are fully quantized
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- The activations are hard-clamped in [-clamp_threshold, clamp_threshold]
to prevent overflow during the backward pass
"""
def __init__(
self,
module,
p=1,
update_step=1000,
bits=8,
method="histogram",
clamp_threshold=5,
):
self.module = module
self.p = p
self.update_step = update_step
self.counter = 0
self.bits = bits
self.method = method
self.clamp_threshold = clamp_threshold
self.handle = None
self.register_hook()
def register_hook(self):
# forward hook
def quantize_hook(module, x, y):
# update parameters every 1000 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.module.training else 1
# quantize activations
y_q, self.scale, self.zero_point = emulate_int(
y.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(y)
mask.bernoulli_(1 - p)
noise = (y_q - y).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
return torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach()
# register hook
self.handle = self.module.register_forward_hook(quantize_hook)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/quantization/scalar/modules/qact.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
std::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = filters.size(0);
const auto filterSize = filters.size(1);
const auto numFiltersInBlock = numFeatures / numHeads;
const dim3 blocks(minibatch, numFeatures);
auto output = at::zeros_like(input);
auto stream = at::cuda::getCurrentCUDAStream();
"""
sequence_if = """
if (sequenceLength <= {seq}) {{
switch(filterSize) {{
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {pad}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{
lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
filters.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
output.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;
}
break;
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;
}
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
final_return = """
}
return {output};
}
"""
with open("lightconv_cuda_forward.cu", "w") as forward:
forward.write(head)
for seq in seqs:
forward.write(sequence_if.format(seq=seq))
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(con_else)
forward.write(final_else)
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(final_return)
def gen_backward():
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
std::vector<at::Tensor> lightconv_cuda_backward(
at::Tensor gradOutput,
int padding_l,
at::Tensor input,
at::Tensor filters) {
// gradWrtInput
const int minibatch = input.size(0);
const int numFeatures = input.size(1);
const int sequenceLength = input.size(2);
const int numHeads = filters.size(0);
const int filterSize = filters.size(1);
const dim3 gradBlocks(minibatch, numFeatures);
const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads);
const dim3 weightGradSecondpassBlocks(numHeads, filterSize);
const int numFiltersInBlock = numFeatures / numHeads;
auto gradInput = at::zeros_like(input);
auto gradFilters = at::zeros_like(filters);
at::DeviceGuard g(input.device());
auto stream = at::cuda::getCurrentCUDAStream();
switch(filterSize) {
"""
sequence_if = """
if (sequenceLength <= {seq}) {{
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {p}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{
lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t>
<<<gradBlocks, {b_size}, 0, stream>>>(
gradOutput.data<scalar_t>(),
filters.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
gradInput.data<scalar_t>());
"""
weight_grad_short = """
at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat));
lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t>
<<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
gradOutput.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
tempSumGradFilters.data<float>()
);
lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t>
<<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(
tempSumGradFilters.data<float>(),
minibatch,
numFiltersInBlock,
gradFilters.data<scalar_t>()
);
}}));
}} else
"""
weight_grad = """
at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat));
lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t>
<<<gradBlocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
gradOutput.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
tempSumGradFilters.data<float>()
);
lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t>
<<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(
tempSumGradFilters.data<float>(),
minibatch,
numFiltersInBlock,
gradFilters.data<scalar_t>()
);
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
"""
breakout = """
break;
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
last_return = """
}
return {gradInput, gradFilters};
}
"""
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
thresh = [32, 32, 64, 128, 256, -1, -1, -1]
max_mem = [-1, -1, -1, -1, -1, 192, 96, 64]
with open("lightconv_cuda_backward.cu", "w") as backward:
backward.write(head)
for (k, t, mem) in zip(kernels, thresh, max_mem):
backward.write(case_k.format(k=k))
for seq in seqs:
if (t == -1 or seq <= t) and (mem == -1 or seq < mem):
backward.write(sequence_if.format(seq=seq))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=seq, p=p))
backward.write(weight_grad_short.format(k=k, b_size=seq, p=p))
backward.write(bad_padding)
else:
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=32, p=p))
backward.write(weight_grad.format(k=k, b_size=32, p=p))
backward.write(bad_padding)
backward.write(breakout)
break
backward.write(con_else)
backward.write(bad_filter)
backward.write(last_return)
if __name__ == "__main__":
gen_forward()
gen_backward()
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/lightconv_layer/cuda_function_gen.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .lightconv_layer import LightconvLayer # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/lightconv_layer/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import lightconv_cuda
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from torch import nn
from torch.autograd import Function
class lightconvFunction(Function):
@staticmethod
def forward(ctx, x, weights, padding_l):
ctx.padding_l = padding_l
outputs = lightconv_cuda.forward(x, weights, padding_l)
variables = [x, weights]
ctx.save_for_backward(*variables)
return outputs[0]
@staticmethod
def backward(ctx, grad_output):
outputs = lightconv_cuda.backward(
grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors
)
grad_input, grad_weights = outputs
return grad_input, grad_weights, None
@with_incremental_state
class LightconvLayer(nn.Module):
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
weight_softmax=False,
num_heads=1,
weight_dropout=0.0,
bias=False,
):
super(LightconvLayer, self).__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_softmax = weight_softmax
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.weight = nn.Parameter(torch.Tensor(num_heads, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.reset_parameters()
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
for k, v in state_dict.items():
if k.endswith(prefix + "weight"):
if v.dim() == 3 and v.size(1) == 1:
state_dict[k] = v.squeeze(1)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, x, incremental_state=None):
# during inference time, incremental BMM is faster
if incremental_state is not None:
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
weight = self.weight
if self.weight_softmax:
weight = F.softmax(weight.float(), dim=1).type_as(weight)
weight = weight[:, -x_unfold.size(2) :]
K = weight.size(1)
weight = (
weight.view(1, H, K)
.expand(T * B, H, K)
.contiguous()
.view(T * B * H, K, 1)
)
weight = self.weight_dropout_module(weight)
output = torch.bmm(x_unfold, weight) # T*B*H x R x 1
output = output.view(T, B, C)
return output
# during training time, use CUDA kernel
else:
x = x.permute(1, 2, 0).contiguous()
weight = self.weight
if self.weight_softmax:
weight = F.softmax(self.weight, -1)
if self.weight_dropout_module.p:
weight = self.weight_dropout_module(weight)
return lightconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1)
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def half(self):
return self._apply(lambda t: t.half() if t.is_floating_point() else t)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/lightconv_layer/lightconv_layer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name="lightconv_layer",
ext_modules=[
CUDAExtension(
"lightconv_cuda",
[
"lightconv_cuda.cpp",
"lightconv_cuda_kernel.cu",
],
),
],
cmdclass={"build_ext": BuildExtension},
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/lightconv_layer/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
blocks = [32, 64, 128, 256]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
std::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = weight.size(1);
const auto filterSize = weight.size(2);
const auto numFiltersInBlock = numFeatures / numHeads;
const dim3 blocks(minibatch, numFeatures);
auto output = at::zeros_like(input);
auto stream = at::cuda::getCurrentCUDAStream();
"""
switch = """
switch(filterSize) {
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {pad}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{
dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
output.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;
}
break;\n
"""
end = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;
}
return {output};
}
"""
with open("dynamicconv_cuda_forward.cu", "w") as forward:
forward.write(head)
forward.write(switch)
for k in kernels:
b_size = 32
for b in blocks:
if b > k:
b_size = b
break
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=b_size, pad=pad))
forward.write(bad_padding)
forward.write(end)
def gen_backward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
thresh = [512, 512, 512, 512, 512, 380, 256, 256]
min_block = [64, 64, 64, 64, 64, 64, 128, 256]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = weight.size(1);
const auto filterSize = weight.size(2);
const auto numFiltersInBlock = numFeatures / numHeads;
auto numChunks = 1;
auto gradInput = at::zeros_like(input);
auto gradWeight = at::zeros_like(weight);
auto stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(minibatch, numHeads, numChunks);
"""
sequence_if = """
if (sequenceLength < {seq}) {{
switch(filterSize) {{
"""
case_k = """
case {k}:
"""
chunks_reset = """
numChunks = int(ceilf(sequenceLength/float({b_size})));
blocks = dim3(minibatch, numHeads, numChunks);
"""
main_block = """
if (padding_l == {p}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{
dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;\n
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
last_return = """
}
return {gradInput, gradWeight};
}
"""
with open("dynamicconv_cuda_backward.cu", "w") as backward:
backward.write(head)
for seq in seqs:
backward.write(sequence_if.format(seq=seq))
for k, t, m in zip(kernels, thresh, min_block):
backward.write(case_k.format(k=k))
if seq <= t:
b_size = seq
else:
b_size = m
backward.write(chunks_reset.format(b_size=b_size))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=b_size, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(con_else)
backward.write(final_else)
for k, m in zip(kernels, min_block):
backward.write(case_k.format(k=k))
backward.write(chunks_reset.format(b_size=m))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=m, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(last_return)
if __name__ == "__main__":
gen_forward()
gen_backward()
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/dynamicconv_layer/cuda_function_gen.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .dynamicconv_layer import DynamicconvLayer # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/dynamicconv_layer/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dynamicconv_cuda
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.unfold import unfold1d
from torch import nn
from torch.autograd import Function
class dynamicconvFunction(Function):
@staticmethod
def forward(ctx, x, weights, padding_l):
ctx.padding_l = padding_l
outputs = dynamicconv_cuda.forward(x, weights, padding_l)
variables = [x, weights]
ctx.save_for_backward(*variables)
return outputs[0]
@staticmethod
def backward(ctx, grad_output):
outputs = dynamicconv_cuda.backward(
grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors
)
grad_input, grad_weights = outputs
return grad_input, grad_weights, None
@with_incremental_state
class DynamicconvLayer(nn.Module):
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
weight_softmax=False,
num_heads=1,
weight_dropout=0.0,
bias=False,
renorm_padding=False,
conv_bias=False,
query_size=None,
):
super(DynamicconvLayer, self).__init__()
self.input_size = input_size
self.query_size = input_size if query_size is None else query_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_softmax = weight_softmax
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.renorm_padding = renorm_padding
self.bias = bias
self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias)
if conv_bias:
self.conv_bias = nn.Parameter(torch.Tensor(input_size))
else:
self.conv_bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_linear.weight)
if self.conv_bias is not None:
nn.init.constant_(self.conv_bias, 0.0)
nn.init.constant_(self.weight_linaer.bias, 0.0)
def forward(self, x, incremental_state=None, query=None, unfold=None):
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
# R = C // H
# during inference time, incremental BMM is faster
if incremental_state is not None:
unfold = (
x.size(0) > 512 if unfold is None else unfold
) # use unfold mode as default for long sequence to save memory
unfold = unfold or (incremental_state is not None)
assert query is None
if query is None:
query = x
if unfold:
output = self._forward_unfolded(x, incremental_state, query)
else:
output = self._forward_expanded(x, incremental_state, query)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
# during training time, use CUDA kernel
else:
weight = self.weight_linear(x).view(T, B, H, K)
if self.weight_softmax:
weight = F.softmax(weight, dim=-1)
if self.weight_dropout_module.p:
weight = self.weight_dropout_module(weight)
weight = weight.permute(1, 2, 3, 0).contiguous()
self.filters = weight
x = x.permute(1, 2, 0).contiguous()
output = dynamicconvFunction.apply(x, weight, self.padding_l).permute(
2, 0, 1
)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def _forward_unfolded(self, x, incremental_state, query):
"""The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right."""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight_linear(query).view(T * B * H, -1)
# renorm_padding is only implemented in _forward_expanded
assert not self.renorm_padding or incremental_state is not None
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
else:
padding_l = self.padding_l
if K > T and padding_l == K - 1:
weight = weight.narrow(1, K - T, T)
K, padding_l = T, T - 1
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, K, padding_l, 0)
x_unfold = x_unfold.view(T * B * H, R, K)
if self.weight_softmax and not self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = weight.narrow(1, 0, K)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2) :]
K = weight.size(1)
if self.weight_softmax and self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_stat, query):
"""Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
"""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight_linear(query).view(T * B * H, -1)
if not self.renorm_padding:
if self.weight_softmax:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
weight = weight.narrow(1, 0, K).contiguous()
weight = weight.view(T, B * H, K).transpose(0, 1)
x = x.view(T, B * H, R).transpose(0, 1)
if self.weight_softmax and self.renorm_padding:
# turn the convolution filters into band matrices
weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf"))
weight_expanded.as_strided(
(B * H, T, K), (T * (T + K - 1), T + K, 1)
).copy_(weight)
weight_expanded = weight_expanded.narrow(2, self.padding_l, T)
# normalize the weight over valid positions like self-attention
weight_expanded = F.softmax(weight_expanded, dim=2)
weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)
else:
P = self.padding_l
# For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length
if K > T and P == K - 1:
weight = weight.narrow(2, K - T, T)
K, P = T, T - 1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)
weight_expanded.as_strided(
(B * H, T, K), (T * (T + K - 1), T + K, 1)
).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name="dynamicconv_layer",
ext_modules=[
CUDAExtension(
name="dynamicconv_cuda",
sources=[
"dynamicconv_cuda.cpp",
"dynamicconv_cuda_kernel.cu",
],
),
],
cmdclass={"build_ext": BuildExtension},
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/modules/dynamicconv_layer/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import BaseWrapperDataset
class OffsetTokensDataset(BaseWrapperDataset):
def __init__(self, dataset, offset):
super().__init__(dataset)
self.offset = offset
def __getitem__(self, idx):
return self.dataset[idx] + self.offset
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/offset_tokens_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import torch
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
def _flatten(dico, prefix=None):
"""Flatten a nested dictionary."""
new_dico = OrderedDict()
if isinstance(dico, dict):
prefix = prefix + "." if prefix is not None else ""
for k, v in dico.items():
if v is None:
continue
new_dico.update(_flatten(v, prefix + k))
elif isinstance(dico, list):
for i, v in enumerate(dico):
new_dico.update(_flatten(v, prefix + ".[" + str(i) + "]"))
else:
new_dico = OrderedDict({prefix: dico})
return new_dico
def _unflatten(dico):
"""Unflatten a flattened dictionary into a nested dictionary."""
new_dico = OrderedDict()
for full_k, v in dico.items():
full_k = full_k.split(".")
node = new_dico
for k in full_k[:-1]:
if k.startswith("[") and k.endswith("]"):
k = int(k[1:-1])
if k not in node:
node[k] = OrderedDict()
node = node[k]
node[full_k[-1]] = v
return new_dico
class NestedDictionaryDataset(FairseqDataset):
def __init__(self, defn, sizes=None):
super().__init__()
self.defn = _flatten(defn)
self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes
first = None
for v in self.defn.values():
if not isinstance(
v,
(
FairseqDataset,
torch.utils.data.Dataset,
),
):
raise ValueError("Expected Dataset but found: {}".format(v.__class__))
first = first or v
if len(v) > 0:
assert len(v) == len(first), "dataset lengths must match"
self._len = len(first)
def __getitem__(self, index):
return OrderedDict((k, ds[index]) for k, ds in self.defn.items())
def __len__(self):
return self._len
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
if len(samples) == 0:
return {}
sample = OrderedDict()
for k, ds in self.defn.items():
try:
sample[k] = ds.collater([s[k] for s in samples])
except NotImplementedError:
sample[k] = default_collate([s[k] for s in samples])
return _unflatten(sample)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(s[index] for s in self.sizes)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if len(self.sizes) == 1:
return self.sizes[0][index]
else:
return (s[index] for s in self.sizes)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return any(ds.supports_prefetch for ds in self.defn.values())
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
for ds in self.defn.values():
if getattr(ds, "supports_prefetch", False):
ds.prefetch(indices)
@property
def can_reuse_epoch_itr_across_epochs(self):
return all(ds.can_reuse_epoch_itr_across_epochs for ds in self.defn.values())
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.defn.values():
ds.set_epoch(epoch)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/nested_dictionary_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
from collections import OrderedDict
from typing import Dict, List, Optional
import numpy as np
from fairseq.data import data_utils
from . import FairseqDataset
logger = logging.getLogger(__name__)
class MultiCorpusDataset(FairseqDataset):
"""
Stores multiple instances of FairseqDataset together.
Unless batch_sample=True, requires each instance
to be the same dataset, as the collate method needs to work on batches with
samples from each dataset.
Allows specifying a distribution over the datasets to use. Note that unlike
MultiCorpusSampledDataset, this distribution allows sampling for each item,
rather than on a batch level. Note that datasets with sampling probabilty
of 0 will be skipped.
Each time ordered_indices() is called, a new sample is generated with
the specified distribution.
Args:
datasets: a OrderedDict of FairseqDataset instances.
distribution: a List containing the probability of getting an utterance from
corresponding dataset
seed: random seed for sampling the datsets
sort_indices: if true, will sort the ordered indices by size
batch_sample: if true, will ensure each batch is from a single dataset
"""
def __init__(
self,
datasets: Dict[str, FairseqDataset],
distribution: List[float],
seed: int,
sort_indices: bool = False,
batch_sample: bool = False,
distributed_rank: Optional[int] = None,
):
super().__init__()
assert isinstance(datasets, OrderedDict)
assert len(datasets) == len(distribution)
assert sum(distribution) == 1
self.datasets = datasets
self.distribution = distribution
self.seed = seed
self.sort_indices = sort_indices
self.batch_sample = batch_sample
self.distributed_rank = distributed_rank
# Avoid repeated conversions to list later
self.dataset_list = list(datasets.values())
self.total_num_instances = 0
first_dataset = self.dataset_list[0]
self.num_instances_per_dataset = []
self.dataset_offsets = []
for i, dataset in enumerate(self.dataset_list):
assert isinstance(dataset, FairseqDataset)
assert type(dataset) is type(first_dataset)
self.num_instances_per_dataset.append(
0 if self.distribution[i] == 0 else len(dataset)
)
self.dataset_offsets.append(self.total_num_instances)
self.total_num_instances += self.num_instances_per_dataset[i]
def ordered_indices(self):
start = time.time()
with data_utils.numpy_seed(self.seed, self.epoch):
logger.info(f"sampling new dataset with seed {self.seed} epoch {self.epoch}")
sampled_indices = []
num_selected_instances = 0
# For each dataset i, sample self.distribution[i] * self.total_num_instances
for i, key in enumerate(self.datasets):
if self.distribution[i] == 0:
# skip dataset if sampling probability is 0
continue
if i < len(self.datasets) - 1:
num_instances = int(self.distribution[i] * self.total_num_instances)
high = self.dataset_offsets[i + 1]
else:
num_instances = self.total_num_instances - num_selected_instances
high = self.total_num_instances
logger.info(f"sampling {num_instances} from {key} dataset")
num_selected_instances += num_instances
# First, add k copies of the dataset where k = num_instances // len(dataset).
# This ensures an equal distribution of the data points as much as possible.
# For the remaining entries randomly sample them
dataset_size = len(self.datasets[key])
num_copies = num_instances // dataset_size
dataset_indices = (
np.random.permutation(high - self.dataset_offsets[i])
+ self.dataset_offsets[i]
)[: num_instances - num_copies * dataset_size]
if num_copies > 0:
sampled_indices += list(
np.concatenate(
(
np.repeat(
np.arange(self.dataset_offsets[i], high), num_copies
),
dataset_indices,
)
)
)
else:
sampled_indices += list(dataset_indices)
assert (
len(sampled_indices) == self.total_num_instances
), f"{len(sampled_indices)} vs {self.total_num_instances}"
np.random.shuffle(sampled_indices)
if self.sort_indices:
sampled_indices.sort(key=lambda i: self.num_tokens(i))
logger.info(
"multi_corpus_dataset ordered_indices took {}s".format(
time.time() - start
)
)
return np.array(sampled_indices, dtype=np.int64)
def _map_index(self, index: int):
"""
If dataset A has length N and dataset B has length M
then index 1 maps to index 1 of dataset A, and index N + 1
maps to index 1 of B.
"""
counter = 0
for num_instances, key in zip(self.num_instances_per_dataset, self.datasets):
if index < counter + num_instances:
return index - counter, key
counter += num_instances
raise ValueError(
"Invalid index: {}, max: {}".format(index, self.total_num_instances)
)
def __len__(self):
"""
Length of this dataset is the sum of individual datasets
"""
return self.total_num_instances
def __getitem__(self, index):
new_index, key = self._map_index(index)
try:
item = self.datasets[key][new_index]
item["full_id"] = index
return item
except Exception as e:
e.args = (f"Error from {key} dataset", *e.args)
raise
def collater(self, samples):
"""
If we are doing batch sampling, then pick the right collater to use.
Otherwise we assume all collaters are the same.
"""
if len(samples) == 0:
return None
if "full_id" in samples[0]:
_, key = self._map_index(samples[0]["full_id"])
try:
batch = self.datasets[key].collater(samples)
except Exception:
print(f"Collating failed for key {key}", flush=True)
raise
return batch
else:
# Subclasses may override __getitem__ to not specify full_id
return list(self.datasets.values())[0].collater(samples)
def num_tokens(self, index: int):
index, key = self._map_index(index)
return self.datasets[key].num_tokens(index)
def size(self, index: int):
index, key = self._map_index(index)
return self.datasets[key].size(index)
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
logger.info(f"setting epoch of multi_corpus_dataset to {epoch}")
self.epoch = epoch
@property
def supports_prefetch(self):
return False
@property
def supports_fetch_outside_dataloader(self):
return all(
self.datasets[key].supports_fetch_outside_dataloader
for key in self.datasets
)
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
if not self.batch_sample:
return super().batch_by_size(
indices, max_tokens, max_sentences, required_batch_size_multiple
)
dataset_indices = {key: [] for key in self.datasets}
for i in indices:
_, key = self._map_index(i)
dataset_indices[key].append(i)
batches = []
for key in dataset_indices:
cur_batches = super().batch_by_size(
np.array(dataset_indices[key], dtype=np.int64),
max_tokens,
max_sentences,
required_batch_size_multiple,
)
logger.info(f"Created {len(cur_batches)} batches for dataset {key}")
batches += cur_batches
# If this dataset is used in a distributed training setup,
# then shuffle such that the order is seeded by the distributed rank
# as well
if self.distributed_rank is not None:
with data_utils.numpy_seed(self.seed, self.epoch, self.distributed_rank):
np.random.shuffle(batches)
return batches
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/multi_corpus_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import numpy as np
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
class ConcatDataset(FairseqDataset):
@staticmethod
def cumsum(sequence, sample_ratios):
r, s = [], 0
for e, ratio in zip(sequence, sample_ratios):
curr_len = int(ratio * len(e))
r.append(curr_len + s)
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = [sample_ratios] * len(self.datasets)
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
sample_idx = sample_idx % self.real_sizes[dataset_idx]
return dataset_idx, sample_idx
def collater(self, samples, **extra_args):
# For now only supports datasets with same underlying collater implementations
if hasattr(self.datasets[0], "collater"):
return self.datasets[0].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
"""
Return an example's size as a float or tuple.
"""
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for ds, sr in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
# Only support underlying dataset with single size array.
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all(d.supports_prefetch for d in self.datasets)
def ordered_indices(self):
"""
Returns indices sorted by length. So less padding is needed.
"""
if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1:
# special handling for concatenating lang_pair_datasets
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = (
sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
)
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(src_sizes[indices], kind="mergesort")]
else:
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for to, ds in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, "supports_prefetch", False):
ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to])
frm = to
@property
def can_reuse_epoch_itr_across_epochs(self):
return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/concat_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import BaseWrapperDataset
class ReplaceDataset(BaseWrapperDataset):
"""Replaces tokens found in the dataset by a specified replacement token
Args:
dataset (~torch.utils.data.Dataset): dataset to replace tokens in
replace_map(Dictionary[int,int]): map of token to replace -> replacement token
offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be
as many as the number of objects returned by the underlying dataset __getitem__ method.
"""
def __init__(self, dataset, replace_map, offsets):
super().__init__(dataset)
assert len(replace_map) > 0
self.replace_map = replace_map
self.offsets = offsets
def __getitem__(self, index):
item = self.dataset[index]
is_tuple = isinstance(item, tuple)
srcs = item if is_tuple else [item]
for offset, src in zip(self.offsets, srcs):
for k, v in self.replace_map.items():
src_off = src[offset:] if offset >= 0 else src[:offset]
src_off.masked_fill_(src_off == k, v)
item = srcs if is_tuple else srcs[0]
return item
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/replace_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from . import FairseqDataset
def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True):
"""Backtranslate a list of samples.
Given an input (*samples*) of the form:
[{'id': 1, 'source': 'hallo welt'}]
this will return:
[{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}]
Args:
samples (List[dict]): samples to backtranslate. Individual samples are
expected to have a 'source' key, which will become the 'target'
after backtranslation.
collate_fn (callable): function to collate samples into a mini-batch
generate_fn (callable): function to generate backtranslations
cuda (bool): use GPU for generation (default: ``True``)
Returns:
List[dict]: an updated list of samples with a backtranslated source
"""
collated_samples = collate_fn(samples)
s = utils.move_to_cuda(collated_samples) if cuda else collated_samples
generated_sources = generate_fn(s)
id_to_src = {sample["id"]: sample["source"] for sample in samples}
# Go through each tgt sentence in batch and its corresponding best
# generated hypothesis and create a backtranslation data pair
# {id: id, source: generated backtranslation, target: original tgt}
return [
{
"id": id.item(),
"target": id_to_src[id.item()],
"source": hypos[0]["tokens"].cpu(),
}
for id, hypos in zip(collated_samples["id"], generated_sources)
]
class BacktranslationDataset(FairseqDataset):
"""
Sets up a backtranslation dataset which takes a tgt batch, generates
a src using a tgt-src backtranslation function (*backtranslation_fn*),
and returns the corresponding `{generated src, input tgt}` batch.
Args:
tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be
backtranslated. Only the source side of this dataset will be used.
After backtranslation, the source sentences in this dataset will be
returned as the targets.
src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated
sentences.
tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of
sentences to be backtranslated.
backtranslation_fn (callable, optional): function to call to generate
backtranslations. This is typically the `generate` method of a
:class:`~fairseq.sequence_generator.SequenceGenerator` object.
Pass in None when it is not available at initialization time, and
use set_backtranslation_fn function to set it when available.
output_collater (callable, optional): function to call on the
backtranslated samples to create the final batch
(default: ``tgt_dataset.collater``).
cuda: use GPU for generation
"""
def __init__(
self,
tgt_dataset,
src_dict,
tgt_dict=None,
backtranslation_fn=None,
output_collater=None,
cuda=True,
**kwargs
):
self.tgt_dataset = tgt_dataset
self.backtranslation_fn = backtranslation_fn
self.output_collater = (
output_collater if output_collater is not None else tgt_dataset.collater
)
self.cuda = cuda if torch.cuda.is_available() else False
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def __getitem__(self, index):
"""
Returns a single sample from *tgt_dataset*. Note that backtranslation is
not applied in this step; use :func:`collater` instead to backtranslate
a batch of samples.
"""
return self.tgt_dataset[index]
def __len__(self):
return len(self.tgt_dataset)
def set_backtranslation_fn(self, backtranslation_fn):
self.backtranslation_fn = backtranslation_fn
def collater(self, samples):
"""Merge and backtranslate a list of samples to form a mini-batch.
Using the samples from *tgt_dataset*, load a collated target sample to
feed to the backtranslation model. Then take the backtranslation with
the best score as the source and the original input as the target.
Note: we expect *tgt_dataset* to provide a function `collater()` that
will collate samples into the format expected by *backtranslation_fn*.
After backtranslation, we will feed the new list of samples (i.e., the
`(backtranslated source, original source)` pairs) to *output_collater*
and return the result.
Args:
samples (List[dict]): samples to backtranslate and collate
Returns:
dict: a mini-batch with keys coming from *output_collater*
"""
if samples[0].get("is_dummy", False):
return samples
samples = backtranslate_samples(
samples=samples,
collate_fn=self.tgt_dataset.collater,
generate_fn=(lambda net_input: self.backtranslation_fn(net_input)),
cuda=self.cuda,
)
return self.output_collater(samples)
def num_tokens(self, index):
"""Just use the tgt dataset num_tokens"""
return self.tgt_dataset.num_tokens(index)
def ordered_indices(self):
"""Just use the tgt dataset ordered_indices"""
return self.tgt_dataset.ordered_indices()
def size(self, index):
"""Return an example's size as a float or tuple. This value is used
when filtering a dataset with ``--max-positions``.
Note: we use *tgt_dataset* to approximate the length of the source
sentence, since we do not know the actual length until after
backtranslation.
"""
tgt_size = self.tgt_dataset.size(index)[0]
return (tgt_size, tgt_size)
@property
def supports_prefetch(self):
return getattr(self.tgt_dataset, "supports_prefetch", False)
def prefetch(self, indices):
return self.tgt_dataset.prefetch(indices)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/backtranslation_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class IdDataset(FairseqDataset):
def __getitem__(self, index):
return index
def __len__(self):
return 0
def collater(self, samples):
return torch.tensor(samples)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/id_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class PrependDataset(BaseWrapperDataset):
def __init__(self, dataset, prepend_getter, ensure_first_token_is=None):
super().__init__(dataset)
self.prepend_getter = prepend_getter
self.ensure_first_token = ensure_first_token_is
def __getitem__(self, idx):
item = self.dataset[idx]
is_tuple = isinstance(item, tuple)
src = item[0] if is_tuple else item
assert self.ensure_first_token is None or src[0] == self.ensure_first_token
prepend_idx = self.prepend_getter(self.dataset, idx)
assert isinstance(prepend_idx, int)
src[0] = prepend_idx
item = tuple((src,) + item[1:]) if is_tuple else src
return item
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/prepend_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from typing import Callable, Dict, List
import numpy as np
from . import FairseqDataset
def uniform_sampler(x):
# Sample from uniform distribution
return np.random.choice(x, 1).item()
class MultiCorpusSampledDataset(FairseqDataset):
"""
Stores multiple instances of FairseqDataset together and in every iteration
creates a batch by first sampling a dataset according to a specified
probability distribution and then getting instances from that dataset.
Args:
datasets: an OrderedDict of FairseqDataset instances.
sampling_func: A function for sampling over list of dataset keys.
The default strategy is to sample uniformly.
"""
def __init__(
self,
datasets: Dict[str, FairseqDataset],
sampling_func: Callable[[List], int] = None,
):
super().__init__()
assert isinstance(datasets, OrderedDict)
self.datasets = datasets
if sampling_func is None:
sampling_func = uniform_sampler
self.sampling_func = sampling_func
self.total_num_instances = 0
for _, dataset in datasets.items():
assert isinstance(dataset, FairseqDataset)
self.total_num_instances += len(dataset)
self._ordered_indices = None
def __len__(self):
"""
Length of this dataset is the sum of individual datasets
"""
return self.total_num_instances
def ordered_indices(self):
"""
Ordered indices for batching. Here we call the underlying
dataset's ordered_indices() so that we get the same random ordering
as we would have from using the underlying dataset directly.
"""
if self._ordered_indices is None:
self._ordered_indices = OrderedDict(
[
(key, dataset.ordered_indices())
for key, dataset in self.datasets.items()
]
)
return np.arange(len(self))
def _map_index_to_dataset(self, key: int, index: int):
"""
Different underlying datasets have different lengths. In order to ensure
we are not accessing an index outside the range of the current dataset
size, we wrap around. This function should be called after we have
created an ordering for this and all underlying datasets.
"""
assert (
self._ordered_indices is not None
), "Must call MultiCorpusSampledDataset.ordered_indices() first"
mapped_index = index % len(self.datasets[key])
return self._ordered_indices[key][mapped_index]
def __getitem__(self, index: int):
"""
Get the item associated with index from each underlying dataset.
Since index is in the range of [0, TotalNumInstances], we need to
map the index to the dataset before retrieving the item.
"""
return OrderedDict(
[
(key, dataset[self._map_index_to_dataset(key, index)])
for key, dataset in self.datasets.items()
]
)
def collater(self, samples: List[Dict]):
"""
Generate a mini-batch for this dataset.
To convert this into a regular mini-batch we use the following
logic:
1. Select a dataset using the specified probability distribution.
2. Call the collater function of the selected dataset.
"""
if len(samples) == 0:
return None
selected_key = self.sampling_func(list(self.datasets.keys()))
selected_samples = [sample[selected_key] for sample in samples]
return self.datasets[selected_key].collater(selected_samples)
def num_tokens(self, index: int):
"""
Return an example's length (number of tokens), used for batching. Here
we return the max across all examples at index across all underlying
datasets.
"""
return max(
dataset.num_tokens(self._map_index_to_dataset(key, index))
for key, dataset in self.datasets.items()
)
def size(self, index: int):
"""
Return an example's size as a float or tuple. Here we return the max
across all underlying datasets. This value is used when filtering a
dataset with max-positions.
"""
return max(
dataset.size(self._map_index_to_dataset(key, index))
for key, dataset in self.datasets.items()
)
@property
def supports_prefetch(self):
return all(
getattr(dataset, "supports_prefetch", False)
for dataset in self.datasets.values()
)
def prefetch(self, indices):
for key, dataset in self.datasets.items():
dataset.prefetch(
[self._map_index_to_dataset(key, index) for index in indices]
)
@property
def supports_fetch_outside_dataloader(self):
return all(
self.datasets[key].supports_fetch_outside_dataloader
for key in self.datasets
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/multi_corpus_sampled_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import FairseqDataset
class NumSamplesDataset(FairseqDataset):
def __getitem__(self, index):
return 1
def __len__(self):
return 0
def collater(self, samples):
return sum(samples)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/num_samples_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from fairseq.data import data_utils
class WordNoising(object):
"""Generate a noisy version of a sentence, without changing words themselves."""
def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None):
self.dictionary = dictionary
self.bpe_end = None
if bpe_cont_marker:
self.bpe_end = np.array(
[
not self.dictionary[i].endswith(bpe_cont_marker)
for i in range(len(self.dictionary))
]
)
elif bpe_end_marker:
self.bpe_end = np.array(
[
self.dictionary[i].endswith(bpe_end_marker)
for i in range(len(self.dictionary))
]
)
self.get_word_idx = (
self._get_bpe_word_idx if self.bpe_end is not None else self._get_token_idx
)
def noising(self, x, lengths, noising_prob=0.0):
raise NotImplementedError()
def _get_bpe_word_idx(self, x):
"""
Given a list of BPE tokens, for every index in the tokens list,
return the index of the word grouping that it belongs to.
For example, for input x corresponding to ["how", "are", "y@@", "ou"],
return [[0], [1], [2], [2]].
"""
# x: (T x B)
bpe_end = self.bpe_end[x]
if x.size(0) == 1 and x.size(1) == 1:
# Special case when we only have one word in x. If x = [[N]],
# bpe_end is a scalar (bool) instead of a 2-dim array of bools,
# which makes the sum operation below fail.
return np.array([[0]])
# do a reduce front sum to generate word ids
word_idx = bpe_end[::-1].cumsum(0)[::-1]
word_idx = word_idx.max(0)[None, :] - word_idx
return word_idx
def _get_token_idx(self, x):
"""
This is to extend noising functions to be able to apply to non-bpe
tokens, e.g. word or characters.
"""
x = torch.t(x)
word_idx = np.array([range(len(x_i)) for x_i in x])
return np.transpose(word_idx)
class WordDropout(WordNoising):
"""Randomly drop input words. If not passing blank_idx (default is None),
then dropped words will be removed. Otherwise, it will be replaced by the
blank_idx."""
def __init__(
self,
dictionary,
default_dropout_prob=0.1,
bpe_cont_marker="@@",
bpe_end_marker=None,
):
super().__init__(dictionary, bpe_cont_marker, bpe_end_marker)
self.default_dropout_prob = default_dropout_prob
def noising(self, x, lengths, dropout_prob=None, blank_idx=None):
if dropout_prob is None:
dropout_prob = self.default_dropout_prob
# x: (T x B), lengths: B
if dropout_prob == 0:
return x, lengths
assert 0 < dropout_prob < 1
# be sure to drop entire words
word_idx = self.get_word_idx(x)
sentences = []
modified_lengths = []
for i in range(lengths.size(0)):
# Since dropout probabilities need to apply over non-pad tokens,
# it is not trivial to generate the keep mask without consider
# input lengths; otherwise, this could be done outside the loop
# We want to drop whole words based on word_idx grouping
num_words = max(word_idx[:, i]) + 1
# ith example: [x0, x1, ..., eos, pad, ..., pad]
# We should only generate keep probs for non-EOS tokens. Thus if the
# input sentence ends in EOS, the last word idx is not included in
# the dropout mask generation and we append True to always keep EOS.
# Otherwise, just generate the dropout mask for all word idx
# positions.
has_eos = x[lengths[i] - 1, i] == self.dictionary.eos()
if has_eos: # has eos?
keep = np.random.rand(num_words - 1) >= dropout_prob
keep = np.append(keep, [True]) # keep EOS symbol
else:
keep = np.random.rand(num_words) >= dropout_prob
words = x[: lengths[i], i].tolist()
# TODO: speed up the following loop
# drop words from the input according to keep
new_s = [
w if keep[word_idx[j, i]] else blank_idx for j, w in enumerate(words)
]
new_s = [w for w in new_s if w is not None]
# we need to have at least one word in the sentence (more than the
# start / end sentence symbols)
if len(new_s) <= 1:
# insert at beginning in case the only token left is EOS
# EOS should be at end of list.
new_s.insert(0, words[np.random.randint(0, len(words))])
assert len(new_s) >= 1 and (
not has_eos # Either don't have EOS at end or last token is EOS
or (len(new_s) >= 2 and new_s[-1] == self.dictionary.eos())
), "New sentence is invalid."
sentences.append(new_s)
modified_lengths.append(len(new_s))
# re-construct input
modified_lengths = torch.LongTensor(modified_lengths)
modified_x = torch.LongTensor(
modified_lengths.max(), modified_lengths.size(0)
).fill_(self.dictionary.pad())
for i in range(modified_lengths.size(0)):
modified_x[: modified_lengths[i], i].copy_(torch.LongTensor(sentences[i]))
return modified_x, modified_lengths
class WordShuffle(WordNoising):
"""Shuffle words by no more than k positions."""
def __init__(
self,
dictionary,
default_max_shuffle_distance=3,
bpe_cont_marker="@@",
bpe_end_marker=None,
):
super().__init__(dictionary, bpe_cont_marker, bpe_end_marker)
self.default_max_shuffle_distance = 3
def noising(self, x, lengths, max_shuffle_distance=None):
if max_shuffle_distance is None:
max_shuffle_distance = self.default_max_shuffle_distance
# x: (T x B), lengths: B
if max_shuffle_distance == 0:
return x, lengths
# max_shuffle_distance < 1 will return the same sequence
assert max_shuffle_distance > 1
# define noise word scores
noise = np.random.uniform(
0,
max_shuffle_distance,
size=(x.size(0), x.size(1)),
)
noise[0] = -1 # do not move start sentence symbol
# be sure to shuffle entire words
word_idx = self.get_word_idx(x)
x2 = x.clone()
for i in range(lengths.size(0)):
length_no_eos = lengths[i]
if x[lengths[i] - 1, i] == self.dictionary.eos():
length_no_eos = lengths[i] - 1
# generate a random permutation
scores = word_idx[:length_no_eos, i] + noise[word_idx[:length_no_eos, i], i]
# ensure no reordering inside a word
scores += 1e-6 * np.arange(length_no_eos.item())
permutation = scores.argsort()
# shuffle words
x2[:length_no_eos, i].copy_(
x2[:length_no_eos, i][torch.from_numpy(permutation)]
)
return x2, lengths
class UnsupervisedMTNoising(WordNoising):
"""
Implements the default configuration for noising in UnsupervisedMT
(github.com/facebookresearch/UnsupervisedMT)
"""
def __init__(
self,
dictionary,
max_word_shuffle_distance,
word_dropout_prob,
word_blanking_prob,
bpe_cont_marker="@@",
bpe_end_marker=None,
):
super().__init__(dictionary)
self.max_word_shuffle_distance = max_word_shuffle_distance
self.word_dropout_prob = word_dropout_prob
self.word_blanking_prob = word_blanking_prob
self.word_dropout = WordDropout(
dictionary=dictionary,
bpe_cont_marker=bpe_cont_marker,
bpe_end_marker=bpe_end_marker,
)
self.word_shuffle = WordShuffle(
dictionary=dictionary,
bpe_cont_marker=bpe_cont_marker,
bpe_end_marker=bpe_end_marker,
)
def noising(self, x, lengths):
# 1. Word Shuffle
noisy_src_tokens, noisy_src_lengths = self.word_shuffle.noising(
x=x,
lengths=lengths,
max_shuffle_distance=self.max_word_shuffle_distance,
)
# 2. Word Dropout
noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising(
x=noisy_src_tokens,
lengths=noisy_src_lengths,
dropout_prob=self.word_dropout_prob,
)
# 3. Word Blanking
noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising(
x=noisy_src_tokens,
lengths=noisy_src_lengths,
dropout_prob=self.word_blanking_prob,
blank_idx=self.dictionary.unk(),
)
return noisy_src_tokens
class NoisingDataset(torch.utils.data.Dataset):
def __init__(
self,
src_dataset,
src_dict,
seed,
noiser=None,
noising_class=UnsupervisedMTNoising,
**kwargs
):
"""
Wrap a :class:`~torch.utils.data.Dataset` and apply noise to the
samples based on the supplied noising configuration.
Args:
src_dataset (~torch.utils.data.Dataset): dataset to wrap.
to build self.src_dataset --
a LanguagePairDataset with src dataset as the source dataset and
None as the target dataset. Should NOT have padding so that
src_lengths are accurately calculated by language_pair_dataset
collate function.
We use language_pair_dataset here to encapsulate the tgt_dataset
so we can re-use the LanguagePairDataset collater to format the
batches in the structure that SequenceGenerator expects.
src_dict (~fairseq.data.Dictionary): source dictionary
seed (int): seed to use when generating random noise
noiser (WordNoising): a pre-initialized :class:`WordNoising`
instance. If this is None, a new instance will be created using
*noising_class* and *kwargs*.
noising_class (class, optional): class to use to initialize a
default :class:`WordNoising` instance.
kwargs (dict, optional): arguments to initialize the default
:class:`WordNoising` instance given by *noiser*.
"""
self.src_dataset = src_dataset
self.src_dict = src_dict
self.seed = seed
self.noiser = (
noiser
if noiser is not None
else noising_class(
dictionary=src_dict,
**kwargs,
)
)
self.sizes = src_dataset.sizes
def __getitem__(self, index):
"""
Returns a single noisy sample. Multiple samples are fed to the collater
create a noising dataset batch.
"""
src_tokens = self.src_dataset[index]
src_lengths = torch.LongTensor([len(src_tokens)])
src_tokens = src_tokens.unsqueeze(0)
# Transpose src tokens to fit expected shape of x in noising function
# (batch size, sequence length) -> (sequence length, batch size)
src_tokens_t = torch.t(src_tokens)
with data_utils.numpy_seed(self.seed + index):
noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths)
# Transpose back to expected src_tokens format
# (sequence length, 1) -> (1, sequence length)
noisy_src_tokens = torch.t(noisy_src_tokens)
return noisy_src_tokens[0]
def __len__(self):
"""
The length of the noising dataset is the length of src.
"""
return len(self.src_dataset)
@property
def supports_prefetch(self):
return self.src_dataset.supports_prefetch
def prefetch(self, indices):
if self.src_dataset.supports_prefetch:
self.src_dataset.prefetch(indices)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/noising.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from fairseq.data import data_utils
from . import BaseWrapperDataset
class TruncateDataset(BaseWrapperDataset):
"""Truncate a sequence by returning the first truncation_length tokens"""
def __init__(self, dataset, truncation_length):
super().__init__(dataset)
assert truncation_length is not None
self.truncation_length = truncation_length
self.dataset = dataset
def __getitem__(self, index):
item = self.dataset[index]
item_len = item.size(0)
if item_len > self.truncation_length:
item = item[: self.truncation_length]
return item
@property
def sizes(self):
return np.minimum(self.dataset.sizes, self.truncation_length)
def __len__(self):
return len(self.dataset)
class RandomCropDataset(TruncateDataset):
"""Truncate a sequence by returning a random crop of truncation_length tokens"""
def __init__(self, dataset, truncation_length, seed=1):
super().__init__(dataset, truncation_length)
self.seed = seed
self.epoch = 0
@property
def can_reuse_epoch_itr_across_epochs(self):
return True # only the crop changes, not item sizes
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
item_len = item.size(0)
excess = item_len - self.truncation_length
if excess > 0:
start_idx = np.random.randint(0, excess)
item = item[start_idx : start_idx + self.truncation_length]
return item
def maybe_shorten_dataset(
dataset,
split,
shorten_data_split_list,
shorten_method,
tokens_per_sample,
seed,
):
truncate_split = (
split in shorten_data_split_list.split(",") or len(shorten_data_split_list) == 0
)
if shorten_method == "truncate" and truncate_split:
dataset = TruncateDataset(dataset, tokens_per_sample)
elif shorten_method == "random_crop" and truncate_split:
dataset = RandomCropDataset(dataset, tokens_per_sample, seed)
return dataset
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/shorten_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
from . import BaseWrapperDataset
logger = logging.getLogger(__name__)
class SubsampleDataset(BaseWrapperDataset):
"""Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples
Args:
dataset (~torch.utils.data.Dataset): dataset to subsample
size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive)
"""
def __init__(self, dataset, size_ratio, shuffle=False):
super().__init__(dataset)
assert size_ratio < 1
self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int)
self.indices = np.random.choice(
list(range(len(self.dataset))), self.actual_size, replace=False
)
self.shuffle = shuffle
logger.info(
"subsampled dataset from {} to {} (ratio={})".format(
len(self.dataset), self.actual_size, size_ratio
)
)
def __getitem__(self, index):
return self.dataset[self.indices[index]]
def __len__(self):
return self.actual_size
def collater(self, samples):
return self.dataset.collater(samples)
@property
def sizes(self):
return self.dataset.sizes[self.indices]
@property
def name(self):
return self.dataset.name
def num_tokens(self, index):
return self.dataset.num_tokens(self.indices[index])
def size(self, index):
return self.dataset.size(self.indices[index])
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
def prefetch(self, indices):
self.dataset.prefetch(self.indices[indices])
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/subsample_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from . import BaseWrapperDataset
class SortDataset(BaseWrapperDataset):
def __init__(self, dataset, sort_order):
super().__init__(dataset)
if not isinstance(sort_order, (list, tuple)):
sort_order = [sort_order]
self.sort_order = sort_order
assert all(len(so) == len(dataset) for so in sort_order)
def ordered_indices(self):
return np.lexsort(self.sort_order)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/sort_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
class TextCompressionLevel(Enum):
none = 0
low = 1
high = 2
class TextCompressor(object):
def __init__(
self, level: TextCompressionLevel,
max_input_byte_length: int = 2 ** 16
):
self.level = level
self.max_input_length = max_input_byte_length
def compress(self, text: str) -> bytes:
if self.level == TextCompressionLevel.low:
import zlib
# zlib: built-in, fast
return zlib.compress(text.encode(), level=0)
elif self.level == TextCompressionLevel.high:
try:
import unishox2
# unishox2: optimized for short text but slower
except ImportError:
raise ImportError(
"Please install unishox2 for the text compression feature: "
"pip install unishox2-py3"
)
assert len(text.encode()) <= self.max_input_length
return unishox2.compress(text)[0]
else:
return text.encode()
def decompress(self, compressed: bytes) -> str:
if self.level == TextCompressionLevel.low:
import zlib
return zlib.decompress(compressed).decode()
elif self.level == TextCompressionLevel.high:
try:
import unishox2
except ImportError:
raise ImportError(
"Please install unishox2 for the text compression feature: "
"pip install unishox2-py3"
)
return unishox2.decompress(compressed, self.max_input_length)
else:
return compressed.decode()
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/text_compressor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import FairseqDataset, data_utils
def collate(samples, pad_idx, eos_idx, fixed_pad_length=None, pad_to_bsz=None):
if len(samples) == 0:
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(
data_utils.collate_tokens(
[s[key][i] for s in samples],
pad_idx,
eos_idx,
left_pad=False,
pad_to_length=fixed_pad_length,
pad_to_bsz=pad_to_bsz,
)
)
return res
else:
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad=False,
pad_to_length=fixed_pad_length,
pad_to_bsz=pad_to_bsz,
)
src_tokens = merge("source")
if samples[0]["target"] is not None:
is_target_list = isinstance(samples[0]["target"], list)
target = merge("target", is_target_list)
else:
target = src_tokens
return {
"id": torch.LongTensor([s["id"] for s in samples]),
"nsentences": len(samples),
"ntokens": sum(len(s["source"]) for s in samples),
"net_input": {
"src_tokens": src_tokens,
"src_lengths": torch.LongTensor([s["source"].numel() for s in samples]),
},
"target": target,
}
class MonolingualDataset(FairseqDataset):
"""
A wrapper around torch.utils.data.Dataset for monolingual data.
Args:
dataset (torch.utils.data.Dataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
shuffle (bool, optional): shuffle the elements before batching
(default: True).
"""
def __init__(
self,
dataset,
sizes,
src_vocab,
tgt_vocab=None,
add_eos_for_other_targets=False,
shuffle=False,
targets=None,
add_bos_token=False,
fixed_pad_length=None,
pad_to_bsz=None,
src_lang_idx=None,
tgt_lang_idx=None,
):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = tgt_vocab or src_vocab
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
self.fixed_pad_length = fixed_pad_length
self.pad_to_bsz = pad_to_bsz
self.src_lang_idx = src_lang_idx
self.tgt_lang_idx = tgt_lang_idx
assert targets is None or all(
t in {"self", "future", "past"} for t in targets
), "targets must be none or one of 'self', 'future', 'past'"
if targets is not None and len(targets) == 0:
targets = None
self.targets = targets
def __getitem__(self, index):
if self.targets is not None:
# *future_target* is the original sentence
# *source* is shifted right by 1 (maybe left-padded with eos)
# *past_target* is shifted right by 2 (left-padded as needed)
#
# Left-to-right language models should condition on *source* and
# predict *future_target*.
# Right-to-left language models should condition on *source* and
# predict *past_target*.
source, future_target, past_target = self.dataset[index]
source, target = self._make_source_target(
source, future_target, past_target
)
else:
source = self.dataset[index]
target = None
source, target = self._maybe_add_bos(source, target)
return {"id": index, "source": source, "target": target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if self.targets is not None:
target = []
if (
self.add_eos_for_other_targets
and (("self" in self.targets) or ("past" in self.targets))
and source[-1] != self.vocab.eos()
):
# append eos at the end of source
source = torch.cat([source, source.new([self.vocab.eos()])])
if "future" in self.targets:
future_target = torch.cat(
[future_target, future_target.new([self.vocab.pad()])]
)
if "past" in self.targets:
# first token is before the start of sentence which is only used in "none" break mode when
# add_eos_for_other_targets is False
past_target = torch.cat(
[
past_target.new([self.vocab.pad()]),
past_target[1:],
source[-2, None],
]
)
for t in self.targets:
if t == "self":
target.append(source)
elif t == "future":
target.append(future_target)
elif t == "past":
target.append(past_target)
else:
raise Exception("invalid target " + t)
if len(target) == 1:
target = target[0]
else:
target = future_target
return source, self._filter_vocab(target)
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
source = torch.cat([source.new([self.vocab.bos()]), source])
if target is not None:
target = torch.cat([target.new([self.tgt_vocab.bos()]), target])
return source, target
def num_tokens_vec(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
return self.sizes[indices]
def _filter_vocab(self, target):
if len(self.tgt_vocab) != len(self.vocab):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the right.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the right.
"""
return collate(
samples,
self.vocab.pad(),
self.vocab.eos(),
self.fixed_pad_length,
self.pad_to_bsz,
)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/monolingual_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import threading
from pathlib import Path
import numpy as np
import torch
def fasta_file_path(prefix_path):
return prefix_path + ".fasta"
class FastaDataset(torch.utils.data.Dataset):
"""
For loading protein sequence datasets in the common FASTA data format
"""
def __init__(self, path: str, cache_indices=False):
self.fn = fasta_file_path(path)
self.threadlocal = threading.local()
self.cache = Path(f"{path}.fasta.idx.npy")
if cache_indices:
if self.cache.exists():
self.offsets, self.sizes = np.load(self.cache)
else:
self.offsets, self.sizes = self._build_index(path)
np.save(self.cache, np.stack([self.offsets, self.sizes]))
else:
self.offsets, self.sizes = self._build_index(path)
def _get_file(self):
if not hasattr(self.threadlocal, "f"):
self.threadlocal.f = open(self.fn, "r")
return self.threadlocal.f
def __getitem__(self, idx):
f = self._get_file()
f.seek(self.offsets[idx])
desc = f.readline().strip()
line = f.readline()
seq = ""
while line != "" and line[0] != ">":
seq += line.strip()
line = f.readline()
return desc, seq
def __len__(self):
return self.offsets.size
def _build_index(self, path: str):
# Use grep and awk to get 100M/s on local SSD.
# Should process your enormous 100G fasta in ~10 min single core...
path = fasta_file_path(path)
bytes_offsets = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| grep --byte-offset '^>' -o | cut -d: -f1",
shell=True,
)
fasta_lengths = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'",
shell=True,
)
bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ")
sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ")
return bytes_np, sizes_np
def __setstate__(self, state):
self.__dict__ = state
self.threadlocal = threading.local()
def __getstate__(self):
d = {}
for i, v in self.__dict__.items():
if i != "threadlocal":
d[i] = v
return d
def __del__(self):
if hasattr(self.threadlocal, "f"):
self.threadlocal.f.close()
del self.threadlocal.f
@staticmethod
def exists(path):
return os.path.exists(fasta_file_path(path))
class EncodedFastaDataset(FastaDataset):
"""
The FastaDataset returns raw sequences - this allows us to return
indices with a dictionary instead.
"""
def __init__(self, path, dictionary):
super().__init__(path, cache_indices=True)
self.dictionary = dictionary
def __getitem__(self, idx):
desc, seq = super().__getitem__(idx)
return self.dictionary.encode_line(seq, line_tokenizer=list).long()
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/fasta_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
class BaseWrapperDataset(FairseqDataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if hasattr(self.dataset, "collater"):
return self.dataset.collater(samples)
else:
return default_collate(samples)
@property
def sizes(self):
return self.dataset.sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def attr(self, attr: str, index: int):
return self.dataset.attr(attr, index)
def prefetch(self, indices):
self.dataset.prefetch(indices)
def get_batch_shapes(self):
return self.dataset.get_batch_shapes()
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
return self.dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
def filter_indices_by_size(self, indices, max_sizes):
return self.dataset.filter_indices_by_size(indices, max_sizes)
@property
def can_reuse_epoch_itr_across_epochs(self):
return self.dataset.can_reuse_epoch_itr_across_epochs
def set_epoch(self, epoch):
super().set_epoch(epoch)
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(epoch)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/base_wrapper_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class NumelDataset(BaseWrapperDataset):
def __init__(self, dataset, reduce=False):
super().__init__(dataset)
self.reduce = reduce
def __getitem__(self, index):
item = self.dataset[index]
if torch.is_tensor(item):
return torch.numel(item)
else:
return np.size(item)
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if self.reduce:
return sum(samples)
else:
return torch.tensor(samples)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/numel_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .dictionary import Dictionary, TruncatedDictionary
from .fairseq_dataset import FairseqDataset, FairseqIterableDataset
from .base_wrapper_dataset import BaseWrapperDataset
from .add_target_dataset import AddTargetDataset
from .append_token_dataset import AppendTokenDataset
from .audio.raw_audio_dataset import BinarizedAudioDataset, FileAudioDataset
from .audio.hubert_dataset import HubertDataset
from .backtranslation_dataset import BacktranslationDataset
from .bucket_pad_length_dataset import BucketPadLengthDataset
from .colorize_dataset import ColorizeDataset
from .concat_dataset import ConcatDataset
from .concat_sentences_dataset import ConcatSentencesDataset
from .denoising_dataset import DenoisingDataset
from .id_dataset import IdDataset
from .indexed_dataset import (
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
MMapIndexedDataset,
)
from .language_pair_dataset import LanguagePairDataset
from .list_dataset import ListDataset
from .lm_context_window_dataset import LMContextWindowDataset
from .lru_cache_dataset import LRUCacheDataset
from .mask_tokens_dataset import MaskTokensDataset
from .monolingual_dataset import MonolingualDataset
from .multi_corpus_sampled_dataset import MultiCorpusSampledDataset
from .nested_dictionary_dataset import NestedDictionaryDataset
from .noising import NoisingDataset
from .numel_dataset import NumelDataset
from .num_samples_dataset import NumSamplesDataset
from .offset_tokens_dataset import OffsetTokensDataset
from .pad_dataset import LeftPadDataset, PadDataset, RightPadDataset
from .prepend_dataset import PrependDataset
from .prepend_token_dataset import PrependTokenDataset
from .raw_label_dataset import RawLabelDataset
from .replace_dataset import ReplaceDataset
from .resampling_dataset import ResamplingDataset
from .roll_dataset import RollDataset
from .round_robin_zip_datasets import RoundRobinZipDatasets
from .sort_dataset import SortDataset
from .strip_token_dataset import StripTokenDataset
from .subsample_dataset import SubsampleDataset
from .token_block_dataset import TokenBlockDataset
from .transform_eos_dataset import TransformEosDataset
from .transform_eos_lang_pair_dataset import TransformEosLangPairDataset
from .shorten_dataset import TruncateDataset, RandomCropDataset
from .multilingual.sampled_multi_dataset import SampledMultiDataset
from .multilingual.sampled_multi_epoch_dataset import SampledMultiEpochDataset
from .fasta_dataset import FastaDataset, EncodedFastaDataset
from .iterators import (
CountingIterator,
EpochBatchIterator,
GroupedIterator,
ShardedIterator,
)
__all__ = [
"AddTargetDataset",
"AppendTokenDataset",
"BacktranslationDataset",
"BaseWrapperDataset",
"BinarizedAudioDataset",
"BucketPadLengthDataset",
"ColorizeDataset",
"ConcatDataset",
"ConcatSentencesDataset",
"CountingIterator",
"DenoisingDataset",
"Dictionary",
"EncodedFastaDataset",
"EpochBatchIterator",
"FairseqDataset",
"FairseqIterableDataset",
"FastaDataset",
"FileAudioDataset",
"GroupedIterator",
"HubertDataset",
"IdDataset",
"IndexedCachedDataset",
"IndexedDataset",
"IndexedRawTextDataset",
"LanguagePairDataset",
"LeftPadDataset",
"ListDataset",
"LMContextWindowDataset",
"LRUCacheDataset",
"MaskTokensDataset",
"MMapIndexedDataset",
"MonolingualDataset",
"MultiCorpusSampledDataset",
"NestedDictionaryDataset",
"NoisingDataset",
"NumelDataset",
"NumSamplesDataset",
"OffsetTokensDataset",
"PadDataset",
"PrependDataset",
"PrependTokenDataset",
"RandomCropDataset",
"RawLabelDataset",
"ResamplingDataset",
"ReplaceDataset",
"RightPadDataset",
"RollDataset",
"RoundRobinZipDatasets",
"SampledMultiDataset",
"SampledMultiEpochDataset",
"ShardedIterator",
"SortDataset",
"StripTokenDataset",
"SubsampleDataset",
"TokenBlockDataset",
"TransformEosDataset",
"TransformEosLangPairDataset",
"TruncateDataset",
"TruncatedDictionary",
]
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class ConcatSentencesDataset(FairseqDataset):
def __init__(self, *datasets):
super().__init__()
self.datasets = datasets
assert all(
len(ds) == len(datasets[0]) for ds in datasets
), "datasets must have the same length"
def __getitem__(self, index):
return torch.cat([ds[index] for ds in self.datasets])
def __len__(self):
return len(self.datasets[0])
def collater(self, samples):
return self.datasets[0].collater(samples)
@property
def sizes(self):
return sum(ds.sizes for ds in self.datasets)
def num_tokens(self, index):
return sum(ds.num_tokens(index) for ds in self.datasets)
def size(self, index):
return sum(ds.size(index) for ds in self.datasets)
def ordered_indices(self):
return self.datasets[0].ordered_indices()
@property
def supports_prefetch(self):
return any(getattr(ds, "supports_prefetch", False) for ds in self.datasets)
def prefetch(self, indices):
for ds in self.datasets:
if getattr(ds, "supports_prefetch", False):
ds.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/concat_sentences_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import numpy as np
import torch
from fairseq.data import Dictionary, data_utils
from . import BaseWrapperDataset, LRUCacheDataset
class MaskTokensDataset(BaseWrapperDataset):
"""
A wrapper Dataset for masked language modeling.
Input items are masked according to the specified masking probability.
Args:
dataset: Dataset to wrap.
sizes: Sentence lengths
vocab: Dictionary with the vocabulary and special tokens.
pad_idx: Id of pad token in vocab
mask_idx: Id of mask token in vocab
return_masked_tokens: controls whether to return the non-masked tokens
(the default) or to return a tensor with the original masked token
IDs (and *pad_idx* elsewhere). The latter is useful as targets for
masked LM training.
seed: Seed for random number generator for reproducibility.
mask_prob: probability of replacing a token with *mask_idx*.
leave_unmasked_prob: probability that a masked token is unmasked.
random_token_prob: probability of replacing a masked token with a
random token from the vocabulary.
freq_weighted_replacement: sample random replacement words based on
word frequencies in the vocab.
mask_whole_words: only mask whole words. This should be a byte mask
over vocab indices, indicating whether it is the beginning of a
word. We will extend any mask to encompass the whole word.
bpe: BPE to use for whole-word masking.
mask_multiple_length : repeat each mask index multiple times. Default
value is 1.
mask_stdev : standard deviation of masks distribution in case of
multiple masking. Default value is 0.
"""
@classmethod
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
"""Return the source and target datasets for masked LM training."""
dataset = LRUCacheDataset(dataset)
return (
LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)),
LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)),
)
def __init__(
self,
dataset: torch.utils.data.Dataset,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
return_masked_tokens: bool = False,
seed: int = 1,
mask_prob: float = 0.15,
leave_unmasked_prob: float = 0.1,
random_token_prob: float = 0.1,
freq_weighted_replacement: bool = False,
mask_whole_words: torch.Tensor = None,
mask_multiple_length: int = 1,
mask_stdev: float = 0.0,
):
assert 0.0 < mask_prob < 1.0
assert 0.0 <= random_token_prob <= 1.0
assert 0.0 <= leave_unmasked_prob <= 1.0
assert random_token_prob + leave_unmasked_prob <= 1.0
assert mask_multiple_length >= 1
assert mask_stdev >= 0.0
self.dataset = dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.return_masked_tokens = return_masked_tokens
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
self.mask_whole_words = mask_whole_words
self.mask_multiple_length = mask_multiple_length
self.mask_stdev = mask_stdev
if random_token_prob > 0.0:
if freq_weighted_replacement:
weights = np.array(self.vocab.count)
else:
weights = np.ones(len(self.vocab))
weights[: self.vocab.nspecial] = 0
self.weights = weights / weights.sum()
self.epoch = 0
@property
def can_reuse_epoch_itr_across_epochs(self):
return True # only the noise changes, not item sizes
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index: int):
return self.__getitem_cached__(self.seed, self.epoch, index)
@lru_cache(maxsize=8)
def __getitem_cached__(self, seed: int, epoch: int, index: int):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
sz = len(item)
assert (
self.mask_idx not in item
), "Dataset contains mask_idx (={}), this is not expected!".format(
self.mask_idx,
)
if self.mask_whole_words is not None:
word_begins_mask = self.mask_whole_words.gather(0, item)
word_begins_idx = word_begins_mask.nonzero().view(-1)
sz = len(word_begins_idx)
words = np.split(word_begins_mask, word_begins_idx)[1:]
assert len(words) == sz
word_lens = list(map(len, words))
# decide elements to mask
mask = np.full(sz, False)
num_mask = int(
# add a random number for probabilistic rounding
self.mask_prob * sz / float(self.mask_multiple_length)
+ np.random.rand()
)
# multiple masking as described in the vq-wav2vec paper (https://arxiv.org/abs/1910.05453)
mask_idc = np.random.choice(sz, num_mask, replace=False)
if self.mask_stdev > 0.0:
lengths = np.random.normal(
self.mask_multiple_length, self.mask_stdev, size=num_mask
)
lengths = [max(0, int(round(x))) for x in lengths]
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
],
dtype=np.int64,
)
else:
mask_idc = np.concatenate(
[mask_idc + i for i in range(self.mask_multiple_length)]
)
mask_idc = mask_idc[mask_idc < len(mask)]
try:
mask[mask_idc] = True
except: # something wrong
print(
"Assigning mask indexes {} to mask {} failed!".format(
mask_idc, mask
)
)
raise
if self.return_masked_tokens:
# exit early if we're just returning the masked tokens
# (i.e., the targets for masked LM training)
if self.mask_whole_words is not None:
mask = np.repeat(mask, word_lens)
new_item = np.full(len(mask), self.pad_idx)
new_item[mask] = item[torch.from_numpy(mask.astype(np.uint8)) == 1]
return torch.from_numpy(new_item)
# decide unmasking and random replacement
rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob
if rand_or_unmask_prob > 0.0:
rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob)
if self.random_token_prob == 0.0:
unmask = rand_or_unmask
rand_mask = None
elif self.leave_unmasked_prob == 0.0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob
decision = np.random.rand(sz) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
else:
unmask = rand_mask = None
if unmask is not None:
mask = mask ^ unmask
if self.mask_whole_words is not None:
mask = np.repeat(mask, word_lens)
new_item = np.copy(item)
new_item[mask] = self.mask_idx
if rand_mask is not None:
num_rand = rand_mask.sum()
if num_rand > 0:
if self.mask_whole_words is not None:
rand_mask = np.repeat(rand_mask, word_lens)
num_rand = rand_mask.sum()
new_item[rand_mask] = np.random.choice(
len(self.vocab),
num_rand,
p=self.weights,
)
return torch.from_numpy(new_item)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/mask_tokens_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
from . import BaseWrapperDataset
class LRUCacheDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
@lru_cache(maxsize=8)
def __getitem__(self, index):
return self.dataset[index]
@lru_cache(maxsize=8)
def collater(self, samples):
return self.dataset.collater(samples)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/lru_cache_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from . import FairseqDataset, data_utils
def collate(
samples,
pad_idx,
eos_idx,
vocab,
left_pad_source=False,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
):
assert input_feeding
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=None, # use eos_idx of each sample instead of vocab.eos()
left_pad=left_pad,
move_eos_to_beginning=move_eos_to_beginning,
pad_to_length=pad_to_length,
)
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor([s["source"].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
ntokens = sum(len(s["target"]) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
"nsentences": samples[0]["source"].size(0),
"sort_order": sort_order,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
class DenoisingDataset(FairseqDataset):
"""
A wrapper around TokenBlockDataset for BART dataset.
Args:
dataset (TokenBlockDataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
mask_idx (int): dictionary index used for masked token
mask_whole_words: only mask whole words. This should be a byte mask
over vocab indices, indicating whether it is the beginning of a
word. We will extend any mask to encompass the whole word.
shuffle (bool, optional): shuffle the elements before batching.
Default: ``True``
seed: Seed for random number generator for reproducibility.
args: argparse arguments.
"""
def __init__(
self,
dataset,
sizes,
vocab,
mask_idx,
mask_whole_words,
shuffle,
seed,
args,
eos=None,
item_transform_func=None,
):
self.dataset = dataset
self.sizes = sizes
self.vocab = vocab
self.shuffle = shuffle
self.seed = seed
self.mask_idx = mask_idx
self.mask_whole_word = mask_whole_words
self.mask_ratio = args.mask
self.random_ratio = args.mask_random
self.insert_ratio = args.insert
self.rotate_ratio = args.rotate
self.permute_sentence_ratio = args.permute_sentences
self.eos = eos if eos is not None else vocab.eos()
self.item_transform_func = item_transform_func
if args.bpe != "gpt2":
self.full_stop_index = self.vocab.eos()
else:
assert args.bpe == "gpt2"
self.full_stop_index = self.vocab.index("13")
self.replace_length = args.replace_length
if self.replace_length not in [-1, 0, 1]:
raise ValueError(f"invalid arg: replace_length={self.replace_length}")
if args.mask_length not in ["subword", "word", "span-poisson"]:
raise ValueError(f"invalid arg: mask-length={args.mask_length}")
if args.mask_length == "subword" and args.replace_length not in [0, 1]:
raise ValueError(f"if using subwords, use replace-length=1 or 0")
self.mask_span_distribution = None
if args.mask_length == "span-poisson":
_lambda = args.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.epoch = 0
@property
def can_reuse_epoch_itr_across_epochs(self):
return True # only the noise changes, not item sizes
def set_epoch(self, epoch, **unused):
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
tokens = self.dataset[index]
assert tokens[-1] == self.eos
source, target = tokens, tokens.clone()
if self.permute_sentence_ratio > 0.0:
source = self.permute_sentences(source, self.permute_sentence_ratio)
if self.mask_ratio > 0:
source = self.add_whole_word_mask(source, self.mask_ratio)
if self.insert_ratio > 0:
source = self.add_insertion_noise(source, self.insert_ratio)
if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio:
source = self.add_rolling_noise(source)
# there can additional changes to make:
if self.item_transform_func is not None:
source, target = self.item_transform_func(source, target)
assert (source >= 0).all()
assert (source[1:-1] >= 1).all()
assert (source <= len(self.vocab)).all()
assert source[0] == self.vocab.bos()
assert source[-1] == self.eos
return {
"id": index,
"source": source,
"target": target,
}
def __len__(self):
return len(self.dataset)
def permute_sentences(self, source, p=1.0):
full_stops = source == self.full_stop_index
# Pretend it ends with a full stop so last span is a sentence
full_stops[-2] = 1
# Tokens that are full stops, where the previous token is not
sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0)
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
# Ignore <bos> at start
index = 1
for i in ordering:
sentence = source[(sentence_ends[i - 1] if i > 0 else 1) : sentence_ends[i]]
result[index : index + sentence.size(0)] = sentence
index += sentence.size(0)
return result
def word_starts(self, source):
if self.mask_whole_word is not None:
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
def add_whole_word_mask(self, source, p):
is_word_start = self.word_starts(source)
num_to_mask = int(math.ceil(is_word_start.float().sum() * p))
num_inserts = 0
if num_to_mask == 0:
return source
if self.mask_span_distribution is not None:
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
# Make sure we have enough to mask
cum_length = torch.cumsum(lengths, 0)
while cum_length[-1] < num_to_mask:
lengths = torch.cat(
[
lengths,
self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),
],
dim=0,
)
cum_length = torch.cumsum(lengths, 0)
# Trim to masking budget
i = 0
while cum_length[i] < num_to_mask:
i += 1
lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])
num_to_mask = i + 1
lengths = lengths[:num_to_mask]
# Handle 0-length mask (inserts) separately
lengths = lengths[lengths > 0]
num_inserts = num_to_mask - lengths.size(0)
num_to_mask -= num_inserts
if num_to_mask == 0:
return self.add_insertion_noise(source, num_inserts / source.size(0))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert is_word_start[-1] == 0
word_starts = is_word_start.nonzero(as_tuple=False)
indices = word_starts[
torch.randperm(word_starts.size(0))[:num_to_mask]
].squeeze(1)
mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio
source_length = source.size(0)
assert source_length - 1 not in indices
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[
-1
] = 255 # acts as a long length, so spans don't go over the end of doc
if self.replace_length == 0:
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
if self.mask_span_distribution is not None:
assert len(lengths.size()) == 1
assert lengths.size() == indices.size()
lengths -= 1
while indices.size(0) > 0:
assert lengths.size() == indices.size()
lengths -= is_word_start[indices + 1].long()
uncompleted = lengths >= 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
else:
# A bit faster when all lengths are 1
while indices.size(0) > 0:
uncompleted = is_word_start[indices + 1] == 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
assert source_length - 1 not in indices
source = source[to_keep]
if num_inserts > 0:
source = self.add_insertion_noise(source, num_inserts / source.size(0))
return source
def add_permuted_noise(self, tokens, p):
num_words = len(tokens)
num_to_permute = math.ceil(((num_words * 2) * p) / 2.0)
substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1
tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]
return tokens
def add_rolling_noise(self, tokens):
offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1)
tokens = torch.cat(
(tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]),
dim=0,
)
return tokens
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = torch.randint(
low=1, high=len(self.vocab), size=(num_random,)
)
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch of data
"""
return collate(
samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length
)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices[np.argsort(self.sizes[indices], kind="mergesort")]
def prefetch(self, indices):
self.src.prefetch(indices)
self.tgt.prefetch(indices)
@property
def supports_prefetch(self):
return (
hasattr(self.src, "supports_prefetch")
and self.src.supports_prefetch
and hasattr(self.tgt, "supports_prefetch")
and self.tgt.supports_prefetch
)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/denoising_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import BaseWrapperDataset
class StripTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, id_to_strip):
super().__init__(dataset)
self.id_to_strip = id_to_strip
def __getitem__(self, index):
item = self.dataset[index]
while len(item) > 0 and item[-1] == self.id_to_strip:
item = item[:-1]
while len(item) > 0 and item[0] == self.id_to_strip:
item = item[1:]
return item
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/strip_token_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from fairseq.file_io import PathManager
from fairseq import utils
import os
logger = logging.getLogger(__name__)
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
for filename in PathManager.ls(path):
parts = filename.split(".")
if len(parts) >= 3 and len(parts[1].split("-")) == 2:
return parts[1].split("-")
return src, dst
def collate_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
pad_to_bsz=None,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
batch_size = len(values) if pad_to_bsz is None else max(len(values), pad_to_bsz)
res = values[0].new(batch_size, size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def load_indexed_dataset(
path, dictionary=None, dataset_impl=None, combine=False, default="cached"
):
"""A helper function for loading indexed datasets.
Args:
path (str): path to indexed dataset (e.g., 'data-bin/train')
dictionary (~fairseq.data.Dictionary): data dictionary
dataset_impl (str, optional): which dataset implementation to use. If
not provided, it will be inferred automatically. For legacy indexed
data we use the 'cached' implementation by default.
combine (bool, optional): automatically load and combine multiple
datasets. For example, if *path* is 'data-bin/train', then we will
combine 'data-bin/train', 'data-bin/train1', ... and return a
single ConcatDataset instance.
"""
import fairseq.data.indexed_dataset as indexed_dataset
from fairseq.data.concat_dataset import ConcatDataset
datasets = []
for k in itertools.count():
path_k = path + (str(k) if k > 0 else "")
try:
path_k = indexed_dataset.get_indexed_dataset_to_local(path_k)
except Exception as e:
if "StorageException: [404] Path not found" in str(e):
logger.warning(f"path_k: {e} not found")
else:
raise e
dataset_impl_k = dataset_impl
if dataset_impl_k is None:
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)
dataset = indexed_dataset.make_dataset(
path_k,
impl=dataset_impl_k or default,
fix_lua_indexing=True,
dictionary=dictionary,
)
if dataset is None:
break
logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k))
datasets.append(dataset)
if not combine:
break
if len(datasets) == 0:
return None
elif len(datasets) == 1:
return datasets[0]
else:
return ConcatDataset(datasets)
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def collect_filtered(function, iterable, filtered):
"""
Similar to :func:`filter` but collects filtered elements in ``filtered``.
Args:
function (callable): function that returns ``False`` for elements that
should be filtered
iterable (iterable): iterable to filter
filtered (list): list to store filtered elements
"""
for el in iterable:
if function(el):
yield el
else:
filtered.append(el)
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def compare_leq(a, b):
return a <= b if not isinstance(a, tuple) else max(a) <= b
def check_size(idx):
if isinstance(max_positions, float) or isinstance(max_positions, int):
return size_fn(idx) <= max_positions
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = set(max_positions.keys()) & set(idx_size.keys())
return all(
all(
a is None or b is None or a <= b
for a, b in zip(idx_size[key], max_positions[key])
)
for key in intersect_keys
)
else:
# For MultiCorpusSampledDataset, will generalize it later
if not isinstance(size_fn(idx), Iterable):
return all(size_fn(idx) <= b for b in max_positions)
return all(
a is None or b is None or a <= b
for a, b in zip(size_fn(idx), max_positions)
)
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=-1)
return indices, ignored
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
"""
[deprecated] Filter indices based on their size.
Use `FairseqDataset::filter_indices_by_size` instead.
Args:
indices (List[int]): ordered list of dataset indices
dataset (FairseqDataset): fairseq dataset instance
max_positions (tuple): filter elements larger than this size.
Comparisons are done component-wise.
raise_exception (bool, optional): if ``True``, raise an exception if
any elements are filtered (default: False).
"""
warnings.warn(
"data_utils.filter_by_size is deprecated. "
"Use `FairseqDataset::filter_indices_by_size` instead.",
stacklevel=2,
)
if isinstance(max_positions, float) or isinstance(max_positions, int):
if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray):
ignored = indices[dataset.sizes[indices] > max_positions].tolist()
indices = indices[dataset.sizes[indices] <= max_positions]
elif (
hasattr(dataset, "sizes")
and isinstance(dataset.sizes, list)
and len(dataset.sizes) == 1
):
ignored = indices[dataset.sizes[0][indices] > max_positions].tolist()
indices = indices[dataset.sizes[0][indices] <= max_positions]
else:
indices, ignored = _filter_by_size_dynamic(
indices, dataset.size, max_positions
)
else:
indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if len(ignored) > 0 and raise_exception:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
if len(ignored) > 0:
logger.warning(
(
"{} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if max_sizes is None:
return indices, []
if type(max_sizes) in (int, float):
max_src_size, max_tgt_size = max_sizes, max_sizes
else:
max_src_size, max_tgt_size = max_sizes
if tgt_sizes is None:
ignored = indices[src_sizes[indices] > max_src_size]
else:
ignored = indices[
(src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size)
]
if len(ignored) > 0:
if tgt_sizes is None:
indices = indices[src_sizes[indices] <= max_src_size]
else:
indices = indices[
(src_sizes[indices] <= max_src_size)
& (tgt_sizes[indices] <= max_tgt_size)
]
return indices, ignored.tolist()
def batch_by_size(
indices,
num_tokens_fn,
num_tokens_vec=None,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
fixed_shapes=None,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
num_tokens_vec (List[int], optional): precomputed vector of the number
of tokens for each index in indices (to enable faster batch generation)
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be less than N or a multiple of N (default: 1).
fixed_shapes (List[Tuple[int, int]], optional): if given, batches will
only be created with the given shapes. *max_sentences* and
*required_batch_size_multiple* will be ignored (default: None).
"""
try:
from fairseq.data.data_utils_fast import (
batch_by_size_fn,
batch_by_size_vec,
batch_fixed_shapes_fast,
)
except ImportError:
raise ImportError(
"Please build Cython components with: "
"`python setup.py build_ext --inplace`"
)
except ValueError:
raise ValueError(
"Please build (or rebuild) Cython components with `python setup.py build_ext --inplace`."
)
# added int() to avoid TypeError: an integer is required
max_tokens = (
int(max_tokens) if max_tokens is not None else -1
)
max_sentences = max_sentences if max_sentences is not None else -1
bsz_mult = required_batch_size_multiple
if not isinstance(indices, np.ndarray):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray):
num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1)
if fixed_shapes is None:
if num_tokens_vec is None:
return batch_by_size_fn(
indices,
num_tokens_fn,
max_tokens,
max_sentences,
bsz_mult,
)
else:
return batch_by_size_vec(
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
)
else:
fixed_shapes = np.array(fixed_shapes, dtype=np.int64)
sort_order = np.lexsort(
[
fixed_shapes[:, 1].argsort(), # length
fixed_shapes[:, 0].argsort(), # bsz
]
)
fixed_shapes_sorted = fixed_shapes[sort_order]
return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted)
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == "wordpiece":
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == "letter":
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "silence":
import re
sentence = sentence.replace("<SIL>", "")
sentence = re.sub(' +', ' ', sentence).strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol in {"subword_nmt", "@@ ", "@@"}:
if symbol == "subword_nmt":
symbol = "@@ "
sentence = (sentence + " ").replace(symbol, "").rstrip()
elif symbol == "none":
pass
elif symbol is not None:
raise NotImplementedError(f"Unknown post_process option: {symbol}")
return sentence
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
def get_mem_usage():
try:
import psutil
mb = 1024 * 1024
return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb"
except ImportError:
return "N/A"
# lens: torch.LongTensor
# returns: torch.BoolTensor
def lengths_to_padding_mask(lens):
bsz, max_lens = lens.size(0), torch.max(lens).item()
mask = torch.arange(max_lens).to(lens.device).view(1, max_lens)
mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens)
return mask
# lens: torch.LongTensor
# returns: torch.BoolTensor
def lengths_to_mask(lens):
return ~lengths_to_padding_mask(lens)
def get_buckets(sizes, num_buckets):
buckets = np.unique(
np.percentile(
sizes,
np.linspace(0, 100, num_buckets + 1),
interpolation='lower',
)[1:]
)
return buckets
def get_bucketed_sizes(orig_sizes, buckets):
sizes = np.copy(orig_sizes)
assert np.min(sizes) >= 0
start_val = -1
for end_val in buckets:
mask = (sizes > start_val) & (sizes <= end_val)
sizes[mask] = end_val
start_val = end_val
return sizes
def _find_extra_valid_paths(dataset_path: str) -> set:
paths = utils.split_paths(dataset_path)
all_valid_paths = set()
for sub_dir in paths:
contents = PathManager.ls(sub_dir)
valid_paths = [c for c in contents if re.match("valid*[0-9].*", c) is not None]
all_valid_paths |= {os.path.basename(p) for p in valid_paths}
# Remove .bin, .idx etc
roots = {os.path.splitext(p)[0] for p in all_valid_paths}
return roots
def raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None:
"""Raises if there are paths matching 'valid*[0-9].*' which are not combined or ignored."""
if (
train_cfg.dataset.ignore_unused_valid_subsets
or train_cfg.dataset.combine_valid_subsets
or train_cfg.dataset.disable_validation
or not hasattr(train_cfg.task, "data")
):
return
other_paths = _find_extra_valid_paths(train_cfg.task.data)
specified_subsets = train_cfg.dataset.valid_subset.split(",")
ignored_paths = [p for p in other_paths if p not in specified_subsets]
if ignored_paths:
advice = "Set --combine-val to combine them or --ignore-unused-valid-subsets to ignore them."
msg = f"Valid paths {ignored_paths} will be ignored. {advice}"
raise ValueError(msg)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/data_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class PrependTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if token is not None:
self._sizes = np.array(dataset.sizes) + 1
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
item = torch.cat([item.new([self.token]), item])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if self.token is not None:
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if self.token is not None:
n += 1
return n
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/prepend_token_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class TransformEosDataset(FairseqDataset):
"""A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS.
Note that the transformation is applied in :func:`collater`.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to wrap
eos (int): index of the end-of-sentence symbol
append_eos_to_src (bool, optional): append EOS to the end of src
remove_eos_from_src (bool, optional): remove EOS from the end of src
append_eos_to_tgt (bool, optional): append EOS to the end of tgt
remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt
"""
def __init__(
self,
dataset,
eos,
append_eos_to_src=False,
remove_eos_from_src=False,
append_eos_to_tgt=False,
remove_eos_from_tgt=False,
has_target=True,
):
if not isinstance(dataset, FairseqDataset):
raise ValueError("dataset must be an instance of FairseqDataset")
if append_eos_to_src and remove_eos_from_src:
raise ValueError("cannot combine append_eos_to_src and remove_eos_from_src")
if append_eos_to_tgt and remove_eos_from_tgt:
raise ValueError("cannot combine append_eos_to_tgt and remove_eos_from_tgt")
self.dataset = dataset
self.eos = torch.LongTensor([eos])
self.append_eos_to_src = append_eos_to_src
self.remove_eos_from_src = remove_eos_from_src
self.append_eos_to_tgt = append_eos_to_tgt
self.remove_eos_from_tgt = remove_eos_from_tgt
self.has_target = has_target
# precompute how we should adjust the reported sizes
self._src_delta = 0
self._src_delta += 1 if append_eos_to_src else 0
self._src_delta -= 1 if remove_eos_from_src else 0
self._tgt_delta = 0
self._tgt_delta += 1 if append_eos_to_tgt else 0
self._tgt_delta -= 1 if remove_eos_from_tgt else 0
self._checked_src = False
self._checked_tgt = False
def _check_src(self, src, expect_eos):
if not self._checked_src:
assert (src[-1] == self.eos[0]) == expect_eos
self._checked_src = True
def _check_tgt(self, tgt, expect_eos):
if self.has_target and not self._checked_tgt:
assert (tgt[-1] == self.eos[0]) == expect_eos
self._checked_tgt = True
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
def transform(item):
if self.append_eos_to_src:
self.eos = self.eos.to(device=item["source"].device)
self._check_src(item["source"], expect_eos=False)
item["source"] = torch.cat([item["source"], self.eos])
if self.remove_eos_from_src:
self.eos = self.eos.to(device=item["source"].device)
self._check_src(item["source"], expect_eos=True)
item["source"] = item["source"][:-1]
if self.append_eos_to_tgt:
self.eos = self.eos.to(device=item["target"].device)
self._check_tgt(item["target"], expect_eos=False)
item["target"] = torch.cat([item["target"], self.eos])
if self.remove_eos_from_tgt:
self.eos = self.eos.to(device=item["target"].device)
self._check_tgt(item["target"], expect_eos=True)
item["target"] = item["target"][:-1]
return item
samples = list(map(transform, samples))
return self.dataset.collater(samples)
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
if self.has_target:
src_len, tgt_len = self.dataset.size(index)
return (src_len + self._src_delta, tgt_len + self._tgt_delta)
else:
return self.dataset.size(index)
def ordered_indices(self):
# NOTE: we assume that the ordering does not change based on the
# addition or removal of eos
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/transform_eos_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import BaseWrapperDataset
class ColorizeDataset(BaseWrapperDataset):
""" Adds 'colors' property to net input that is obtained from the provided color getter for use by models """
def __init__(self, dataset, color_getter):
super().__init__(dataset)
self.color_getter = color_getter
def collater(self, samples):
base_collate = super().collater(samples)
if len(base_collate) > 0:
base_collate["net_input"]["colors"] = torch.tensor(
list(self.color_getter(self.dataset, s["id"]) for s in samples),
dtype=torch.long,
)
return base_collate
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/colorize_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class RawLabelDataset(FairseqDataset):
def __init__(self, labels):
super().__init__()
self.labels = labels
def __getitem__(self, index):
return self.labels[index]
def __len__(self):
return len(self.labels)
def collater(self, samples):
return torch.tensor(samples)
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/raw_label_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import BaseWrapperDataset
class ListDataset(BaseWrapperDataset):
def __init__(self, dataset, sizes=None):
super().__init__(dataset)
self._sizes = sizes
def __iter__(self):
for x in self.dataset:
yield x
def collater(self, samples):
return samples
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def set_epoch(self, epoch):
pass
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/list_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Dict, Sequence
import numpy as np
from . import FairseqDataset, LanguagePairDataset
logger = logging.getLogger(__name__)
class RoundRobinZipDatasets(FairseqDataset):
"""Zip multiple :class:`~fairseq.data.FairseqDataset` instances together.
Shorter datasets are repeated in a round-robin fashion to match the length
of the longest one.
Args:
datasets (Dict[~fairseq.data.FairseqDataset]): a dictionary of
:class:`~fairseq.data.FairseqDataset` instances.
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
"""
def __init__(self, datasets, eval_key=None):
super().__init__()
if isinstance(datasets, dict):
datasets = OrderedDict(datasets)
assert isinstance(datasets, OrderedDict)
assert datasets, "Can't make a RoundRobinZipDatasets out of nothing"
for dataset in datasets.values():
assert isinstance(dataset, FairseqDataset)
self.datasets = datasets
self.eval_key = eval_key
self.longest_dataset_key = max(datasets, key=lambda k: len(datasets[k]))
self.longest_dataset = datasets[self.longest_dataset_key]
self._ordered_indices: Dict[str, Sequence[int]] = None
def _map_index(self, key, index):
assert (
self._ordered_indices is not None
), "Must call RoundRobinZipDatasets.ordered_indices() first"
o = self._ordered_indices[key]
return o[index % len(o)]
def __getitem__(self, index):
if self.eval_key is None:
return OrderedDict(
[
(key, dataset[self._map_index(key, index)])
for key, dataset in self.datasets.items()
]
)
else:
# at evaluation time it's useful to pass-through batches from a single key
return self.datasets[self.eval_key][self._map_index(self.eval_key, index)]
def __len__(self):
if self._ordered_indices is not None:
return len(self._ordered_indices[self.longest_dataset_key])
return len(self.longest_dataset)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return None
if self.eval_key is None:
return OrderedDict(
[
(key, dataset.collater([sample[key] for sample in samples]))
for key, dataset in self.datasets.items()
]
)
else:
# at evaluation time it's useful to pass-through batches from a single key
return self.datasets[self.eval_key].collater(samples)
def num_tokens(self, index):
"""Return an example's length (number of tokens), used for batching."""
# TODO make it configurable whether to use max() or sum() here
return max(
dataset.num_tokens(self._map_index(key, index))
for key, dataset in self.datasets.items()
)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return {
key: dataset.size(self._map_index(key, index))
for key, dataset in self.datasets.items()
}
def ordered_indices(self):
"""Ordered indices for batching."""
if self._ordered_indices is None:
# Call the underlying dataset's ordered_indices() here, so that we
# get the same random ordering as we would have from using the
# underlying sub-datasets directly.
self._ordered_indices = OrderedDict(
[
(key, dataset.ordered_indices())
for key, dataset in self.datasets.items()
]
)
return np.arange(len(self))
def filter_indices_by_size(self, indices, max_positions=None):
"""
Filter each sub-dataset independently, then update the round robin to work
on the filtered sub-datasets.
"""
def _deep_until_language_pair(dataset):
if isinstance(dataset, LanguagePairDataset):
return dataset
if hasattr(dataset, "tgt_dataset"):
return _deep_until_language_pair(dataset.tgt_dataset)
if hasattr(dataset, "dataset"):
return _deep_until_language_pair(dataset.dataset)
raise Exception(f"Don't know how to unwrap this dataset: {dataset}")
if not isinstance(max_positions, dict):
max_positions = {k: max_positions for k in self.datasets.keys()}
ignored_some = False
for key, dataset in self.datasets.items():
dataset = _deep_until_language_pair(dataset)
self._ordered_indices[key], ignored = dataset.filter_indices_by_size(
self._ordered_indices[key], max_positions[key]
)
if len(ignored) > 0:
ignored_some = True
logger.warning(
f"{len(ignored)} samples from {key} have invalid sizes and will be skipped, "
f"max_positions={max_positions[key]}, first few sample ids={ignored[:10]}"
)
# Since we are modifying in place the _ordered_indices,
# it's not possible anymore to return valid ignored indices.
# Hopefully the extra debug information print above should be enough to debug.
# Ideally we would receive ignore_invalid_inputs so that we could have
# a proper error message.
return (np.arange(len(self)), [0] if ignored_some else [])
@property
def supports_prefetch(self):
return all(
getattr(dataset, "supports_prefetch", False)
for dataset in self.datasets.values()
)
def prefetch(self, indices):
for key, dataset in self.datasets.items():
dataset.prefetch([self._map_index(key, index) for index in indices])
| EXA-1-master | exa/models/unilm-master/edgelm/fairseq/data/round_robin_zip_datasets.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.