Spaces:
Runtime error
Runtime error
| from __future__ import division | |
| import torch | |
| import torch.nn.functional as F | |
| def multi_head_attention_forward(query, # type: Tensor | |
| key, # type: Tensor | |
| value, # type: Tensor | |
| embed_dim_to_check, # type: int | |
| num_heads, # type: int | |
| in_proj_weight, # type: Tensor | |
| in_proj_bias, # type: Tensor | |
| bias_k, # type: Optional[Tensor] | |
| bias_v, # type: Optional[Tensor] | |
| add_zero_attn, # type: bool | |
| dropout_p, # type: float | |
| out_proj_weight, # type: Tensor | |
| out_proj_bias, # type: Tensor | |
| training=True, # type: bool | |
| key_padding_mask=None, # type: Optional[Tensor] | |
| need_weights=True, # type: bool | |
| attn_mask=None, # type: Optional[Tensor] | |
| use_separate_proj_weight=False, # type: bool | |
| q_proj_weight=None, # type: Optional[Tensor] | |
| k_proj_weight=None, # type: Optional[Tensor] | |
| v_proj_weight=None, # type: Optional[Tensor] | |
| static_k=None, # type: Optional[Tensor] | |
| static_v=None # type: Optional[Tensor] | |
| ): | |
| # type: (...) -> Tuple[Tensor, Optional[Tensor]] | |
| r""" | |
| Args: | |
| query, key, value: map a query and a set of key-value pairs to an output. | |
| See "Attention Is All You Need" for more details. | |
| embed_dim_to_check: total dimension of the model. | |
| num_heads: parallel attention heads. | |
| in_proj_weight, in_proj_bias: input projection weight and bias. | |
| bias_k, bias_v: bias of the key and value sequences to be added at dim=0. | |
| add_zero_attn: add a new batch of zeros to the key and | |
| value sequences at dim=1. | |
| dropout_p: probability of an element to be zeroed. | |
| out_proj_weight, out_proj_bias: the output projection weight and bias. | |
| training: apply dropout if is ``True``. | |
| key_padding_mask: if provided, specified padding elements in the key will | |
| be ignored by the attention. This is an binary mask. When the value is True, | |
| the corresponding value on the attention layer will be filled with -inf. | |
| need_weights: output attn_output_weights. | |
| attn_mask: 2D or 3D mask that prevents attention to certain positions. This is an additive mask | |
| (i.e. the values will be added to the attention layer). A 2D mask will be broadcasted for all | |
| the batches while a 3D mask allows to specify a different mask for the entries of each batch. | |
| use_separate_proj_weight: the function accept the proj. weights for query, key, | |
| and value in different forms. If false, in_proj_weight will be used, which is | |
| a combination of q_proj_weight, k_proj_weight, v_proj_weight. | |
| q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. | |
| static_k, static_v: static key and value used for attention operators. | |
| Shape: | |
| Inputs: | |
| - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is | |
| the embedding dimension. | |
| - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is | |
| the embedding dimension. | |
| - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is | |
| the embedding dimension. | |
| - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. | |
| - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. | |
| 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, | |
| S is the source sequence length. | |
| - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, | |
| N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. | |
| - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, | |
| N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. | |
| Outputs: | |
| - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, | |
| E is the embedding dimension. | |
| - attn_output_weights: :math:`(N, L, S)` where N is the batch size, | |
| L is the target sequence length, S is the source sequence length. | |
| """ | |
| tgt_len, bsz, embed_dim = query.size() | |
| assert embed_dim == embed_dim_to_check | |
| assert key.size() == value.size() | |
| head_dim = embed_dim // num_heads | |
| assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" | |
| scaling = float(head_dim) ** -0.5 | |
| if not use_separate_proj_weight: | |
| if torch.equal(query, key) and torch.equal(key, value): | |
| # self-attention | |
| q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) | |
| elif torch.equal(key, value): | |
| # encoder-decoder attention | |
| # This is inline in_proj function with in_proj_weight and in_proj_bias | |
| _b = in_proj_bias | |
| _start = 0 | |
| _end = embed_dim | |
| _w = in_proj_weight[_start:_end, :] | |
| if _b is not None: | |
| _b = _b[_start:_end] | |
| q = F.linear(query, _w, _b) | |
| if key is None: | |
| assert value is None | |
| k = None | |
| v = None | |
| else: | |
| # This is inline in_proj function with in_proj_weight and in_proj_bias | |
| _b = in_proj_bias | |
| _start = embed_dim | |
| _end = None | |
| _w = in_proj_weight[_start:, :] | |
| if _b is not None: | |
| _b = _b[_start:] | |
| k, v = F.linear(key, _w, _b).chunk(2, dim=-1) | |
| else: | |
| # This is inline in_proj function with in_proj_weight and in_proj_bias | |
| _b = in_proj_bias | |
| _start = 0 | |
| _end = embed_dim | |
| _w = in_proj_weight[_start:_end, :] | |
| if _b is not None: | |
| _b = _b[_start:_end] | |
| q = F.linear(query, _w, _b) | |
| # This is inline in_proj function with in_proj_weight and in_proj_bias | |
| _b = in_proj_bias | |
| _start = embed_dim | |
| _end = embed_dim * 2 | |
| _w = in_proj_weight[_start:_end, :] | |
| if _b is not None: | |
| _b = _b[_start:_end] | |
| k = F.linear(key, _w, _b) | |
| # This is inline in_proj function with in_proj_weight and in_proj_bias | |
| _b = in_proj_bias | |
| _start = embed_dim * 2 | |
| _end = None | |
| _w = in_proj_weight[_start:, :] | |
| if _b is not None: | |
| _b = _b[_start:] | |
| v = F.linear(value, _w, _b) | |
| else: | |
| q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) | |
| len1, len2 = q_proj_weight_non_opt.size() | |
| assert len1 == embed_dim and len2 == query.size(-1) | |
| k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) | |
| len1, len2 = k_proj_weight_non_opt.size() | |
| assert len1 == embed_dim and len2 == key.size(-1) | |
| v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) | |
| len1, len2 = v_proj_weight_non_opt.size() | |
| assert len1 == embed_dim and len2 == value.size(-1) | |
| if in_proj_bias is not None: | |
| q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) | |
| k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) | |
| v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) | |
| else: | |
| q = F.linear(query, q_proj_weight_non_opt, in_proj_bias) | |
| k = F.linear(key, k_proj_weight_non_opt, in_proj_bias) | |
| v = F.linear(value, v_proj_weight_non_opt, in_proj_bias) | |
| q = q * scaling | |
| if attn_mask is not None: | |
| if attn_mask.dim() == 2: | |
| attn_mask = attn_mask.unsqueeze(0) | |
| if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: | |
| raise RuntimeError('The size of the 2D attn_mask is not correct.') | |
| elif attn_mask.dim() == 3: | |
| if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: | |
| raise RuntimeError('The size of the 3D attn_mask is not correct.') | |
| else: | |
| raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) | |
| # attn_mask's dim is 3 now. | |
| if bias_k is not None and bias_v is not None: | |
| if static_k is None and static_v is None: | |
| k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) | |
| v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) | |
| if attn_mask is not None: | |
| attn_mask = F.pad(attn_mask, (0, 1)) | |
| if key_padding_mask is not None: | |
| key_padding_mask = F.pad(key_padding_mask, (0, 1)) | |
| else: | |
| assert static_k is None, "bias cannot be added to static key." | |
| assert static_v is None, "bias cannot be added to static value." | |
| else: | |
| assert bias_k is None | |
| assert bias_v is None | |
| q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) | |
| if k is not None: | |
| k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) | |
| if v is not None: | |
| v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) | |
| if static_k is not None: | |
| assert static_k.size(0) == bsz * num_heads | |
| assert static_k.size(2) == head_dim | |
| k = static_k | |
| if static_v is not None: | |
| assert static_v.size(0) == bsz * num_heads | |
| assert static_v.size(2) == head_dim | |
| v = static_v | |
| src_len = k.size(1) | |
| if key_padding_mask is not None: | |
| assert key_padding_mask.size(0) == bsz | |
| assert key_padding_mask.size(1) == src_len | |
| if add_zero_attn: | |
| src_len += 1 | |
| k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) | |
| v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) | |
| if attn_mask is not None: | |
| attn_mask = F.pad(attn_mask, (0, 1)) | |
| if key_padding_mask is not None: | |
| key_padding_mask = F.pad(key_padding_mask, (0, 1)) | |
| attn_output_weights = torch.bmm(q, k.transpose(1, 2)) | |
| assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] | |
| if attn_mask is not None: | |
| attn_output_weights += attn_mask | |
| if key_padding_mask is not None: | |
| attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) | |
| attn_output_weights = attn_output_weights.masked_fill( | |
| key_padding_mask.unsqueeze(1).unsqueeze(2), | |
| float('-inf'), | |
| ) | |
| attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) | |
| attn_output_weights = F.softmax( | |
| attn_output_weights, dim=-1) | |
| attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) | |
| attn_output = torch.bmm(attn_output_weights, v) | |
| assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] | |
| attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) | |
| attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) | |
| if need_weights: | |
| # average attention weights over heads | |
| attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) | |
| return attn_output, attn_output_weights.sum(dim=1) / num_heads | |
| else: | |
| return attn_output, None | |