File size: 3,023 Bytes
f0e5081 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
# coding=utf-8
# The following code has been taken from https://github.com/NVIDIA/NeMo/blob/ \
# 782b4e1652aaa43c8be390d9db0dc89544afa080/nemo/collections/nlp/modules/ \
# common/megatron/rotary_pos_embedding.py
import importlib.util
import torch
from torch import einsum, nn
__all__ = ['RotaryEmbedding', 'apply_rotary_pos_emb']
try:
from habana_frameworks.torch.hpex.kernels import RotaryPosEmbeddingHelperV1
except ImportError:
RotaryPosEmbeddingHelperV1 = None
# sin, cos tensors cached for all devices
cos_cached = None
sin_cached = None
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
if importlib.util.find_spec('einops') is None:
raise RuntimeError("einops is required for Rotary Embedding")
def forward(self, max_seq_len, offset=0):
seq = torch.arange(max_seq_len, device=self.inv_freq.device) + offset
freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
# first part even vector components, second part odd vector components,
# 2 * dim in dimension size
emb = torch.cat((freqs, freqs), dim=-1)
# emb [seq_length, .., dim]
from einops import rearrange
return rearrange(emb, 'n d -> n 1 1 d')
def _rotate_half(x):
"""
change sign so the last dimension becomes [-odd, +even]
"""
from einops import rearrange
x = rearrange(x, '... (j d) -> ... j d', j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(t, freqs):
"""
input tensor t is of shape [seq_length, ..., dim]
rotary positional embeding tensor freqs is of shape [seq_length, ..., dim]
check https://kexue.fm/archives/8265 for detailed formulas
"""
rot_dim = freqs.shape[-1]
t_pass = None
# due to 0 dim of t_pass tensor, there is zeros tensor DMA from H2D which
# affects performance, check whether we need t_pass
if t.shape[-1] != rot_dim:
# ideally t_pass is empty so rotary pos embedding is applied to all tensor t
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
global cos_cached, sin_cached
if cos_cached is None or sin_cached is None:
cos_cached = freqs.cos().to(t.dtype)
sin_cached = freqs.sin().to(t.dtype)
if t.device.type == "hpu":
assert RotaryPosEmbeddingHelperV1 is not None, "failed to import RotaryPosEmbeddingHelperV1"
t = RotaryPosEmbeddingHelperV1.apply(t, cos_cached, sin_cached, 0) # offset already used in RotaryEmbedding.forward
else:
# first part is cosine component
# second part is sine component, need to change signs with _rotate_half method
t = (t * cos_cached) + (_rotate_half(t) * sin_cached)
if t_pass is None:
return t
return torch.cat((t, t_pass), dim=-1)
|