medmekk HF Staff commited on
Commit
e1a49f7
·
verified ·
1 Parent(s): bf4f478

Upload custom kernels

Browse files
build/torch-universal/liger_kernels/_ops.py CHANGED
@@ -1,8 +1,8 @@
1
  import torch
2
- ops = torch.ops._liger_kernels_20250507090511
3
 
4
  def add_op_namespace_prefix(op_name: str):
5
  """
6
  Prefix op by namespace.
7
  """
8
- return f"_liger_kernels_20250507090511::{op_name}"
 
1
  import torch
2
+ ops = torch.ops._liger_kernels_20250507091026
3
 
4
  def add_op_namespace_prefix(op_name: str):
5
  """
6
  Prefix op by namespace.
7
  """
8
+ return f"_liger_kernels_20250507091026::{op_name}"
build/torch-universal/liger_kernels/rms_norm.py CHANGED
@@ -377,13 +377,12 @@ class LigerRMSNorm(torch.nn.Module):
377
  in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
378
  """
379
 
380
- def __init__(self, hidden_size, eps=1e-6, offset=0.0, casting_mode="llama", in_place=True):
381
- super().__init__()
382
- self.weight = torch.nn.Parameter(torch.ones(hidden_size))
383
- self.variance_epsilon = eps
384
- self.offset = offset
385
- self.casting_mode = casting_mode
386
- self.in_place = in_place
387
 
388
  def forward(self, hidden_states):
389
  """
 
377
  in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
378
  """
379
 
380
+
381
+ weight: torch.Tensor
382
+ variance_epsilon: float
383
+ offset: float = 0
384
+ casting_mode: str = "llama"
385
+ in_place: bool = True
 
386
 
387
  def forward(self, hidden_states):
388
  """
torch-ext/liger_kernels/rms_norm.py CHANGED
@@ -377,13 +377,12 @@ class LigerRMSNorm(torch.nn.Module):
377
  in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
378
  """
379
 
380
- def __init__(self, hidden_size, eps=1e-6, offset=0.0, casting_mode="llama", in_place=True):
381
- super().__init__()
382
- self.weight = torch.nn.Parameter(torch.ones(hidden_size))
383
- self.variance_epsilon = eps
384
- self.offset = offset
385
- self.casting_mode = casting_mode
386
- self.in_place = in_place
387
 
388
  def forward(self, hidden_states):
389
  """
 
377
  in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
378
  """
379
 
380
+
381
+ weight: torch.Tensor
382
+ variance_epsilon: float
383
+ offset: float = 0
384
+ casting_mode: str = "llama"
385
+ in_place: bool = True
 
386
 
387
  def forward(self, hidden_states):
388
  """