diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index d3e4c45501c4bf4d1bb9c69451e5330263f00f53..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:323dbf69b89390fd46b207abc1314a4cbe27491e1bb9f026c840bc3bff43b7d3 -size 2447952 diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py deleted file mode 100644 index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index d25d6d4e382c656ac56b436cb0a4babe828b75aa..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6146ac6e77cbd458560bf67c46d93217833f2caf08260cc80a4aa62ba5645ee9 -size 2471056 diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py deleted file mode 100644 index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index b3e629bc4a0200b8cb36a2483ba17e085137ef0c..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:28eea3907055742f99bc9d7d4260add848adc2f6464e97029f37cd42a5c6bd0a -size 2509832 diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py deleted file mode 100644 index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index 348e0c950321396fd29fbb0d64996501c804e43d..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d5609ad07903b98c83c297bfb64f0d944df5edfe1c611fee23ec6c8fbd952604 -size 2440392 diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py deleted file mode 100644 index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index 2d296b21c2864cc6292a53fbdf34aabc07f2ee89..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e6475ed603ad2cb565bd19ad2554484bd6c00d0d3f02decff60f2285df2546f -size 2463232 diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py deleted file mode 100644 index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index d1d17f12ba992ef1267c24b25d05513453b44f8d..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a0767f6dba00c543d3cb77e2044bccd32ef569abc55b921231112c8a1ddfb187 -size 2502088 diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py deleted file mode 100644 index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index 3ce2339b2c3a1a531db79f5667dc40c514ab5241..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e0c04d860454cc565113a3c93ff755fe9cbba0578c4604b89ad89e47c2503932 -size 2448056 diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..0603eccc9144bee8f9704c4236947e42c905096d --- /dev/null +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b925dc27b6a9afd5b6d11e454275222c531a92f7ca27958ac81a78c580665e4d +size 2448088 diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index bf4346f6bcf6cfe9721c2f5facae07130c46de7d..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:48d7b0d190af1dd0366dbaeb0690b9c7cd1dfdc9aeda9b0b23bce56c70f5cbae -size 2509928 diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..494cce9f6166100fdb10f021911228b1cbfa2bdd --- /dev/null +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfdbe510752b57a8dc4671f744bb0a2da5b1646e0b9a19fec02f1505ba044c8c +size 2509960 diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch26-cxx11-cu126-aarch64-linux/activation/__init__.py b/build/torch26-cxx11-cu126-aarch64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch26-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so b/build/torch26-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so deleted file mode 100755 index 9b07e72a35a8215840e52e262d1593822cd2b869..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:558e4499ad3c09d02633488cfdc802a228b78a8cd51d963c92239d44744298c7 -size 2631936 diff --git a/build/torch26-cxx11-cu126-aarch64-linux/activation/_ops.py b/build/torch26-cxx11-cu126-aarch64-linux/activation/_ops.py deleted file mode 100644 index 6244e4a74379142bc0652a345e44f788bbf5308d..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_bbdc1b4_dirty -ops = torch.ops._activation_bbdc1b4_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_bbdc1b4_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu126-aarch64-linux/activation/layers.py b/build/torch26-cxx11-cu126-aarch64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index 9ce2fd90f00d4e060a60a9ac438ad3cca0d91112..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:11a11d0f4119edc5c637bab04ebd5669750a0e4f4000f58ab1bf5be2d8d9ab0b -size 2518568 diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..d18a35d3e459fa1ecfc1ca166e55cb6ac118a6bb --- /dev/null +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70e544ad6448a5576d26147f48403f3e9e593f4a2e24167dc8acb81ce3b7932e +size 2518600 diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index e5810575cad84c728e3e7e44091d9a28467d76c6..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:56dcc985761e309cbef3fc2a201f26e800583128d6e5a3fc1b23800fb0b8b48c -size 2440544 diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..13989de7ff0a055c8e40e1e1f4d0a9ed9197c1fa --- /dev/null +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60fd224c33657558f03be5be57cc8d35ade23225b1abd71557b170c8a7010cd1 +size 2440576 diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index a61e6f40574131ce5866efe651db62af196eebe2..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:03c5f08322796d0736024412babe5d7f13bb1126387976ae12a80485a40d3883 -size 2502240 diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..76e8710b2a6d75f17d1c40c2ba116c096791c815 --- /dev/null +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e364773259dc1b91f3c0d3b076da83c5a9c6ee18ffdace30315c602dffd1dabe +size 2502264 diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch26-cxx98-cu126-aarch64-linux/activation/__init__.py b/build/torch26-cxx98-cu126-aarch64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch26-cxx98-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so b/build/torch26-cxx98-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so deleted file mode 100755 index b19813010320a7b7df823587e9cf0b78a0c7f760..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f6afd50526ff4221cddd52cb947900cdf6bb95ad0a6bffcd1a86bda4d3f52349 -size 2628128 diff --git a/build/torch26-cxx98-cu126-aarch64-linux/activation/_ops.py b/build/torch26-cxx98-cu126-aarch64-linux/activation/_ops.py deleted file mode 100644 index 6244e4a74379142bc0652a345e44f788bbf5308d..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_bbdc1b4_dirty -ops = torch.ops._activation_bbdc1b4_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_bbdc1b4_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu126-aarch64-linux/activation/layers.py b/build/torch26-cxx98-cu126-aarch64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index 5967a78d0929f911fb8a64d92bf379f6258edc47..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f6eae5c895c564fbd2524ce488f4e91e65dc63402cd41a8bc74474b7437b2e62 -size 2506784 diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..92433b08d2ef878a9d6fc7dfd5281051412ea0b3 --- /dev/null +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ac88cc0d3c65ab283d20608f3a097be29ee572e7856f10f8d7919536efd95b4 +size 2506808 diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index 0b9f449b2c357ba217eefceca0f50ffc270df387..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f8086b2d9e0f2db80385b83e0bc28f8d158725d002e1613e1a46a87732197e9f -size 2448152 diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..16e9df58edb8b6fe6885a0ed783306390db853b1 --- /dev/null +++ b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4f9e647eea40d3d3801d5ee57d4917e4c2e8dbfd87cdfebdc40b1b0a1c571fe +size 2448184 diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py +++ b/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py +++ b/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch27-cxx11-cu126-aarch64-linux/activation/__init__.py b/build/torch27-cxx11-cu126-aarch64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch27-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so b/build/torch27-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so deleted file mode 100755 index a26b59f33b9ede8cc6088d362932a3a950705ef9..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4210a6598f00b8921ecba1a0e24603eb05437a876ca1f473d2641e11d9a67ece -size 2632160 diff --git a/build/torch27-cxx11-cu126-aarch64-linux/activation/_ops.py b/build/torch27-cxx11-cu126-aarch64-linux/activation/_ops.py deleted file mode 100644 index 6244e4a74379142bc0652a345e44f788bbf5308d..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_bbdc1b4_dirty -ops = torch.ops._activation_bbdc1b4_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_bbdc1b4_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu126-aarch64-linux/activation/layers.py b/build/torch27-cxx11-cu126-aarch64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index 565f878ddcee0b2d24c30526b0975f5195e2806a..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:22ed530294eb70c8261e581615bd9da0d2dc1ba8c3f0dcc3696cff9be62580cb -size 2518600 diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..085ef09ae9488945275424ee7a507f0289143ed8 --- /dev/null +++ b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2b72ff2a0f2253e4dfe028842b5f15cabf2647d7812bf4662a2de510ca0c489 +size 2518632 diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py +++ b/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py +++ b/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py deleted file mode 100644 index ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so b/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so deleted file mode 100755 index df74edb6da28103fb058acf976502b52d10294c8..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d669f5fb8675b5a8f7511a16c6c61148d3701169bf5c5469159b12001eacfbac -size 3418712 diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py deleted file mode 100644 index 6244e4a74379142bc0652a345e44f788bbf5308d..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_bbdc1b4_dirty -ops = torch.ops._activation_bbdc1b4_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_bbdc1b4_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py deleted file mode 100644 index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class GeluAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_78448fa.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_78448fa.abi3.so deleted file mode 100755 index 682fe4248f9bfea5d25d30b6f4cbcd7590a5c3d3..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_78448fa.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5d168f3ecfc9539e9a2f0af0a5f533bd958682efd1cc5bd716a964d8f1b6f679 -size 3331432 diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..ea1a9f1b610a4e3ca23afc5e13c26c3e0ef7758b --- /dev/null +++ b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4590c852899e4c11ddb74cfad61e26b07490a91f3c09e0fb0874a3fcc1f533e +size 3331456 diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py index 9599b1a485532e5c16dfb1bb9228c701ad4260a6..6cfb9cfa80b63852c1a9a8641b25616ce4caffd8 100644 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py +++ b/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_78448fa -ops = torch.ops._activation_78448fa +from . import _activation_e99cc09_dirty +ops = torch.ops._activation_e99cc09_dirty def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_78448fa::{op_name}" \ No newline at end of file + return f"_activation_e99cc09_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py index 99c129e3b1c9ed4c18166d5b5d67eb08f137a27f..dea45935f51421e8ee87b05430c2e95840cb4ef8 100644 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py +++ b/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py @@ -5,6 +5,8 @@ from ._ops import ops class SiluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -14,6 +16,8 @@ class SiluAndMul(nn.Module): class GeluAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -23,6 +27,8 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): + can_torch_compile: bool = True + def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -52,6 +62,8 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -59,6 +71,8 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): + can_torch_compile: bool = True + def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x)