entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
ModulatedToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/sr/csrvuu3ij7ffjialtfbzxloffj4ibh54xghkpxfw6spkwver6dc5.py # Topologically Sorted Source Nodes: [sqrt, mul_1, weight], Original ATen: [aten.sqrt, aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # sqrt => full_default_1 # weight => mul_2 # Graph fragment: # %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %full_default_1), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 1.0), kwargs = {}) triton_poi_fused_mul_sqrt_0 = async_compile.triton('triton_poi_fused_mul_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bf/cbfu2sd3446jjdimn3bzgkb5uzjci2fpr7jzqru5ljty6r24kzjc.py # Topologically Sorted Source Nodes: [sqrt, mul_4, weight_1], Original ATen: [aten.sqrt, aten.mul] # Source node to ATen node mapping: # mul_4 => mul_4 # sqrt => full_default_1 # weight_1 => mul_5 # Graph fragment: # %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %full_default_1), kwargs = {}) # %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, 1.0), kwargs = {}) triton_poi_fused_mul_sqrt_1 = async_compile.triton('triton_poi_fused_mul_sqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sqrt_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xh/cxhhbqk3s6iqrz6tqomrot2zckdii7iarr3pj5stdt3ds6rbfufc.py # Topologically Sorted Source Nodes: [style, weight_2], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # style => add_1 # weight_2 => mul_7 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, 0.0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_1), kwargs = {}) triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = 0.0 tmp7 = tmp5 + tmp6 tmp8 = tmp0 * tmp7 tl.store(out_ptr0 + (x4), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/go/cgoav6av4bzem4wmdmkiowlmjpeiubwc67bqu6es4aivwlfpxzhh.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.add] # Source node to ATen node mapping: # out => add_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_6), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sqrt, mul_1, weight], Original ATen: [aten.sqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sqrt_0.run(primals_1, buf0, 12, grid=grid(12), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sqrt, mul_4, weight_1], Original ATen: [aten.sqrt, aten.mul] triton_poi_fused_mul_sqrt_1.run(primals_4, buf1, 16, grid=grid(16), stream=stream0) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm] extern_kernels.mm(primals_3, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [style, weight_2], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_2.run(buf0, buf2, primals_5, buf3, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf5, primals_6, 192, grid=grid(192), stream=stream0) del primals_6 return (buf5, buf0, buf1, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_2, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from copy import deepcopy import torch.nn as nn from functools import partial from torch.nn.init import _calculate_correct_fan def upsample(in_tens, out_H=64): """Upsamples the input to the given size. Args: in_tens (Tensor): Tensor with shape [N, C, H, W]. out_H (int, optional): Output spatial size. Defaults to 64. Returns: Tensor: Output Tensor. """ in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module def _make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModule(nn.Module): """Equalized LR Linear Module with Activation Layer. This module is modified from ``EqualizedLRLinearModule`` defined in PGGAN. The major features updated in this module is adding support for activation layers used in StyleGAN2. Args: equalized_lr_cfg (dict | None, optional): Config for equalized lr. Defaults to dict(gain=1., lr_mul=1.). bias (bool, optional): Whether to use bias item. Defaults to True. bias_init (float, optional): The value for bias initialization. Defaults to ``0.``. act_cfg (dict | None, optional): Config for activation layer. Defaults to None. """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super().__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, ...). Returns: Tensor: Output feature map. """ if x.ndim >= 3: x = x.reshape(x.size(0), -1) x = self.linear(x) if self.with_activation and self.act_type == 'fused_bias': x = self.activate(x, self.bias * self.lr_mul) elif self.bias is not None and self.with_activation: x = self.activate(x + self.bias * self.lr_mul) elif self.bias is not None: x = x + self.bias * self.lr_mul elif self.with_activation: x = self.activate(x) return x class Blur(nn.Module): """Blur module. This module is adopted rightly after upsampling operation in StyleGAN2. Args: kernel (Array): Blur kernel/filter used in UpFIRDn. pad (list[int]): Padding for features. upsample_factor (int, optional): Upsampling factor. Defaults to 1. """ def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = _make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map. """ return upfirdn2d(x, self.kernel, pad=self.pad) class ModulatedConv2d(nn.Module): """Modulated Conv2d in StyleGANv2. This module implements the modulated convolution layers proposed in StyleGAN2. Details can be found in Analyzing and Improving the Image Quality of StyleGAN, CVPR2020. Args: in_channels (int): Input channels. out_channels (int): Output channels. kernel_size (int): Kernel size, same as :obj:`nn.Con2d`. style_channels (int): Channels for the style codes. demodulate (bool, optional): Whether to adopt demodulation. Defaults to True. upsample (bool, optional): Whether to adopt upsampling in features. Defaults to False. downsample (bool, optional): Whether to adopt downsampling in features. Defaults to False. blur_kernel (list[int], optional): Blurry kernel. Defaults to [1, 3, 3, 1]. equalized_lr_cfg (dict | None, optional): Configs for equalized lr. Defaults to dict(mode='fan_in', lr_mul=1., gain=1.). style_mod_cfg (dict, optional): Configs for style modulation module. Defaults to dict(bias_init=1.). style_bias (float, optional): Bias value for style code. Defaults to 0.. eps (float, optional): Epsilon value to avoid computation error. Defaults to 1e-8. """ def __init__(self, in_channels, out_channels, kernel_size, style_channels, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1], equalized_lr_cfg=dict(mode='fan_in', lr_mul=1.0, gain=1.0), style_mod_cfg=dict(bias_init=1.0), style_bias=0.0, eps=1e-08): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.style_channels = style_channels self.demodulate = demodulate assert isinstance(self.kernel_size, int) and (self.kernel_size >= 1 and self.kernel_size % 2 == 1) self.upsample = upsample self.downsample = downsample self.style_bias = style_bias self.eps = eps style_mod_cfg = dict() if style_mod_cfg is None else style_mod_cfg self.style_modulation = EqualLinearActModule(style_channels, in_channels, **style_mod_cfg) lr_mul_ = 1.0 if equalized_lr_cfg is not None: lr_mul_ = equalized_lr_cfg.get('lr_mul', 1.0) self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size).div_(lr_mul_)) if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, (pad0, pad1), upsample_factor=factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) if equalized_lr_cfg is not None: equalized_lr(self, **equalized_lr_cfg) self.padding = kernel_size // 2 def forward(self, x, style): n, c, h, w = x.shape style = self.style_modulation(style).view(n, 1, c, 1, 1 ) + self.style_bias weight = self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(n, self.out_channels, 1, 1, 1) weight = weight.view(n * self.out_channels, c, self.kernel_size, self.kernel_size) if self.upsample: x = x.reshape(1, n * c, h, w) weight = weight.view(n, self.out_channels, c, self.kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(n * c, self. out_channels, self.kernel_size, self.kernel_size) x = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=n) x = x.reshape(n, self.out_channels, *x.shape[-2:]) x = self.blur(x) elif self.downsample: x = self.blur(x) x = x.view(1, n * self.in_channels, *x.shape[-2:]) x = F.conv2d(x, weight, stride=2, padding=0, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) else: x = x.view(1, n * c, h, w) x = F.conv2d(x, weight, stride=1, padding=self.padding, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) return x class UpsampleUpFIRDn(nn.Module): """UpFIRDn for Upsampling. This module is used in the ``to_rgb`` layers in StyleGAN2 for upsampling the images. Args: kernel (Array): Blur kernel/filter used in UpFIRDn. factor (int, optional): Upsampling factor. Defaults to 2. """ def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = _make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map. """ out = upfirdn2d(x, self.kernel, up=self.factor, down=1, pad=self.pad) return out class ModulatedToRGB(nn.Module): """To RGB layer. This module is designed to output image tensor in StyleGAN2. Args: in_channels (int): Input channels. style_channels (int): Channels for the style codes. out_channels (int, optional): Output channels. Defaults to 3. upsample (bool, optional): Whether to adopt upsampling in features. Defaults to False. blur_kernel (list[int], optional): Blurry kernel. Defaults to [1, 3, 3, 1]. style_mod_cfg (dict, optional): Configs for style modulation module. Defaults to dict(bias_init=1.). style_bias (float, optional): Bias value for style code. Defaults to 0.. """ def __init__(self, in_channels, style_channels, out_channels=3, upsample=True, blur_kernel=[1, 3, 3, 1], style_mod_cfg=dict( bias_init=1.0), style_bias=0.0): super().__init__() if upsample: self.upsample = UpsampleUpFIRDn(blur_kernel) self.conv = ModulatedConv2d(in_channels, out_channels=out_channels, kernel_size=1, style_channels=style_channels, demodulate=False, style_mod_cfg=style_mod_cfg, style_bias=style_bias) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, x, style, skip=None): """Forward Function. Args: x ([Tensor): Input features with shape of (N, C, H, W). style (Tensor): Style latent with shape of (N, C). skip (Tensor, optional): Tensor for skip link. Defaults to None. Returns: Tensor: Output features with shape of (N, C, H, W) """ out = self.conv(x, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'style_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F from copy import deepcopy import torch.nn as nn from functools import partial from torch.nn.init import _calculate_correct_fan assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_sqrt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = 0.0 tmp7 = tmp5 + tmp6 tmp8 = tmp0 * tmp7 tl.store(out_ptr0 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) get_raw_stream(0) triton_poi_fused_mul_sqrt_0[grid(12)](primals_1, buf0, 12, XBLOCK= 16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sqrt_1[grid(16)](primals_4, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf1, (4, 4), (1, 4 ), 0), out=buf2) buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_add_mul_2[grid(48)](buf0, buf2, primals_5, buf3, 48, XBLOCK=64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return (buf5, buf0, buf1, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_2, (1, 16, 4, 4), (256, 16, 4, 1), 0)) def upsample(in_tens, out_H=64): """Upsamples the input to the given size. Args: in_tens (Tensor): Tensor with shape [N, C, H, W]. out_H (int, optional): Output spatial size. Defaults to 64. Returns: Tensor: Output Tensor. """ in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module def _make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModule(nn.Module): """Equalized LR Linear Module with Activation Layer. This module is modified from ``EqualizedLRLinearModule`` defined in PGGAN. The major features updated in this module is adding support for activation layers used in StyleGAN2. Args: equalized_lr_cfg (dict | None, optional): Config for equalized lr. Defaults to dict(gain=1., lr_mul=1.). bias (bool, optional): Whether to use bias item. Defaults to True. bias_init (float, optional): The value for bias initialization. Defaults to ``0.``. act_cfg (dict | None, optional): Config for activation layer. Defaults to None. """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super().__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, ...). Returns: Tensor: Output feature map. """ if x.ndim >= 3: x = x.reshape(x.size(0), -1) x = self.linear(x) if self.with_activation and self.act_type == 'fused_bias': x = self.activate(x, self.bias * self.lr_mul) elif self.bias is not None and self.with_activation: x = self.activate(x + self.bias * self.lr_mul) elif self.bias is not None: x = x + self.bias * self.lr_mul elif self.with_activation: x = self.activate(x) return x class Blur(nn.Module): """Blur module. This module is adopted rightly after upsampling operation in StyleGAN2. Args: kernel (Array): Blur kernel/filter used in UpFIRDn. pad (list[int]): Padding for features. upsample_factor (int, optional): Upsampling factor. Defaults to 1. """ def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = _make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map. """ return upfirdn2d(x, self.kernel, pad=self.pad) class ModulatedConv2d(nn.Module): """Modulated Conv2d in StyleGANv2. This module implements the modulated convolution layers proposed in StyleGAN2. Details can be found in Analyzing and Improving the Image Quality of StyleGAN, CVPR2020. Args: in_channels (int): Input channels. out_channels (int): Output channels. kernel_size (int): Kernel size, same as :obj:`nn.Con2d`. style_channels (int): Channels for the style codes. demodulate (bool, optional): Whether to adopt demodulation. Defaults to True. upsample (bool, optional): Whether to adopt upsampling in features. Defaults to False. downsample (bool, optional): Whether to adopt downsampling in features. Defaults to False. blur_kernel (list[int], optional): Blurry kernel. Defaults to [1, 3, 3, 1]. equalized_lr_cfg (dict | None, optional): Configs for equalized lr. Defaults to dict(mode='fan_in', lr_mul=1., gain=1.). style_mod_cfg (dict, optional): Configs for style modulation module. Defaults to dict(bias_init=1.). style_bias (float, optional): Bias value for style code. Defaults to 0.. eps (float, optional): Epsilon value to avoid computation error. Defaults to 1e-8. """ def __init__(self, in_channels, out_channels, kernel_size, style_channels, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1], equalized_lr_cfg=dict(mode='fan_in', lr_mul=1.0, gain=1.0), style_mod_cfg=dict(bias_init=1.0), style_bias=0.0, eps=1e-08): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.style_channels = style_channels self.demodulate = demodulate assert isinstance(self.kernel_size, int) and (self.kernel_size >= 1 and self.kernel_size % 2 == 1) self.upsample = upsample self.downsample = downsample self.style_bias = style_bias self.eps = eps style_mod_cfg = dict() if style_mod_cfg is None else style_mod_cfg self.style_modulation = EqualLinearActModule(style_channels, in_channels, **style_mod_cfg) lr_mul_ = 1.0 if equalized_lr_cfg is not None: lr_mul_ = equalized_lr_cfg.get('lr_mul', 1.0) self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size).div_(lr_mul_)) if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, (pad0, pad1), upsample_factor=factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) if equalized_lr_cfg is not None: equalized_lr(self, **equalized_lr_cfg) self.padding = kernel_size // 2 def forward(self, x, style): n, c, h, w = x.shape style = self.style_modulation(style).view(n, 1, c, 1, 1 ) + self.style_bias weight = self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(n, self.out_channels, 1, 1, 1) weight = weight.view(n * self.out_channels, c, self.kernel_size, self.kernel_size) if self.upsample: x = x.reshape(1, n * c, h, w) weight = weight.view(n, self.out_channels, c, self.kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(n * c, self. out_channels, self.kernel_size, self.kernel_size) x = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=n) x = x.reshape(n, self.out_channels, *x.shape[-2:]) x = self.blur(x) elif self.downsample: x = self.blur(x) x = x.view(1, n * self.in_channels, *x.shape[-2:]) x = F.conv2d(x, weight, stride=2, padding=0, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) else: x = x.view(1, n * c, h, w) x = F.conv2d(x, weight, stride=1, padding=self.padding, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) return x class UpsampleUpFIRDn(nn.Module): """UpFIRDn for Upsampling. This module is used in the ``to_rgb`` layers in StyleGAN2 for upsampling the images. Args: kernel (Array): Blur kernel/filter used in UpFIRDn. factor (int, optional): Upsampling factor. Defaults to 2. """ def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = _make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map. """ out = upfirdn2d(x, self.kernel, up=self.factor, down=1, pad=self.pad) return out class ModulatedToRGBNew(nn.Module): """To RGB layer. This module is designed to output image tensor in StyleGAN2. Args: in_channels (int): Input channels. style_channels (int): Channels for the style codes. out_channels (int, optional): Output channels. Defaults to 3. upsample (bool, optional): Whether to adopt upsampling in features. Defaults to False. blur_kernel (list[int], optional): Blurry kernel. Defaults to [1, 3, 3, 1]. style_mod_cfg (dict, optional): Configs for style modulation module. Defaults to dict(bias_init=1.). style_bias (float, optional): Bias value for style code. Defaults to 0.. """ def __init__(self, in_channels, style_channels, out_channels=3, upsample=True, blur_kernel=[1, 3, 3, 1], style_mod_cfg=dict( bias_init=1.0), style_bias=0.0): super().__init__() if upsample: self.upsample = UpsampleUpFIRDn(blur_kernel) self.conv = ModulatedConv2d(in_channels, out_channels=out_channels, kernel_size=1, style_channels=style_channels, demodulate=False, style_mod_cfg=style_mod_cfg, style_bias=style_bias) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_1 = self.conv.weight_orig primals_5 = self.conv.style_modulation.bias primals_3 = self.conv.style_modulation.linear.weight_orig primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jiangwenj02/mmgeneration
ModulatedToRGB
false
12,623
[ "Apache-2.0" ]
0
da9ad377ae19260467fc332ddb88f505c38a915a
https://github.com/jiangwenj02/mmgeneration/tree/da9ad377ae19260467fc332ddb88f505c38a915a
ClassificationModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/au/caug6nesiygukdpkrndsclkfho3dygoeotjtbnihl4wlyyiuddug.py # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # out => convolution # out_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mw/cmwgs5icg2cwiannk5teep5ap5cybx7v3k4l56nfby2eg4bkymzv.py # Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # out_8 => convolution_4 # out_9 => sigmoid # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 80 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (80, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (80, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16384, grid=grid(16384), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 16384, grid=grid(16384), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 16384, grid=grid(16384), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf7, primals_9, 16384, grid=grid(16384), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 80, 4, 4), (1280, 16, 4, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_1.run(buf9, primals_11, 5120, grid=grid(5120), stream=stream0) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((80, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((80, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ClassificationModel(nn.Module): def __init__(self, num_features_in, num_anchors=1, num_classes=80, prior=0.01, feature_size=256): super(ClassificationModel, self).__init__() self.num_classes = num_classes self.num_anchors = num_anchors self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) self.act1 = nn.ReLU() self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act2 = nn.ReLU() self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act3 = nn.ReLU() self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act4 = nn.ReLU() self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1) self.output_act = nn.Sigmoid() def forward(self, x): out = self.conv1(x) out = self.act1(out) out = self.conv2(out) out = self.act2(out) out = self.conv3(out) out = self.act3(out) out = self.conv4(out) out = self.act4(out) out = self.output(out) out = self.output_act(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 80 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (80, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (80,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_0[grid(16384)](buf7, primals_9, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 80, 4, 4), (1280, 16, 4, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_sigmoid_1[grid(5120)](buf9, primals_11, 5120, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf9) class ClassificationModelNew(nn.Module): def __init__(self, num_features_in, num_anchors=1, num_classes=80, prior=0.01, feature_size=256): super(ClassificationModelNew, self).__init__() self.num_classes = num_classes self.num_anchors = num_anchors self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) self.act1 = nn.ReLU() self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act2 = nn.ReLU() self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act3 = nn.ReLU() self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act4 = nn.ReLU() self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1) self.output_act = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.output.weight primals_11 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
fmrdev/ctracker
ClassificationModel
false
12,624
[ "Apache-2.0" ]
0
6f5a88d569d0132a9f844cd1e55e60032d32bcba
https://github.com/fmrdev/ctracker/tree/6f5a88d569d0132a9f844cd1e55e60032d32bcba
GroupNorm32
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zy/czyl6lkgkemly2rorp7dytnq5bsikpucv7yhz2var2hddv5xnkj4.py # Topologically Sorted Source Nodes: [y, mul, sigmoid, y_1], Original ATen: [aten.native_group_norm, aten.mul, aten.sigmoid] # Source node to ATen node mapping: # mul => mul_2 # sigmoid => sigmoid # y => add, add_1, mul_1, rsqrt, var_mean # y_1 => mul_3 # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4.0), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %sigmoid), kwargs = {}) triton_per_fused_mul_native_group_norm_sigmoid_0 = async_compile.triton('triton_per_fused_mul_native_group_norm_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_native_group_norm_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 4.0 tmp29 = tmp27 * tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = tmp27 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(in_out_ptr1 + (r1 + (64*x0)), tmp31, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [y, mul, sigmoid, y_1], Original ATen: [aten.native_group_norm, aten.mul, aten.sigmoid] stream0 = get_raw_stream(0) triton_per_fused_mul_native_group_norm_sigmoid_0.run(buf3, buf5, primals_1, primals_2, primals_3, buf0, 4, 64, grid=grid(4), stream=stream0) return (buf5, primals_1, primals_2, primals_3, buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class GroupNorm32(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, x): y = super().forward(x.float()) if self.swish == 1.0: y = F.silu(y) elif self.swish: y = y * F.sigmoid(y * float(self.swish)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1, 'num_channels': 4, 'swish': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 4.0 tmp29 = tmp27 * tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = tmp27 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(in_out_ptr1 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = buf4 del buf4 get_raw_stream(0) triton_per_fused_mul_native_group_norm_sigmoid_0[grid(4)](buf3, buf5, primals_1, primals_2, primals_3, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) return buf5, primals_1, primals_2, primals_3, buf0, buf3 class GroupNorm32New(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
johnpaulbin/glide-text2im
GroupNorm32
false
12,625
[ "MIT" ]
0
4897050c4c540316dfb1ec7e6ff95698bcb20487
https://github.com/johnpaulbin/glide-text2im/tree/4897050c4c540316dfb1ec7e6ff95698bcb20487
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/oz/cozo2tvc7hyhhuvn7mvono4mqt4xjxbetoafx6siwgnsijj54xyl.py # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%view_12, [1, 4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_1 = async_compile.triton('triton_poi_fused_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bx/cbxsgautsk4bd7exwaatk4zmdcujosx2bs6glhzldma6whelf3xa.py # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights => exp, sum_1 # masked_fill_ => full_default, where # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_13, %full_default, %bmm), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/k3/ck3ynjvzkegtm2kjok34x3fffzfegirtmypcfcgbywxdahuilxmg.py # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights => div_1, exp # masked_fill_ => full_default, where # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_13, %full_default, %bmm), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_masked_fill_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex x4 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x3), xmask) tmp6 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s7/cs7p2dyxlesdvuyx4owztmqg5sapsarlgzaivin7okeoe6lxygw7.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_18, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_native_layer_norm_5 = async_compile.triton('triton_poi_fused_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/y6/cy6mkjdwes62jaih4dzebyknvxezhquh37cme5cflrxbxff3z675.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add_1, add_2, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_18, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/u4/cu4mvhweewrefdurxuza5qfbqlwomkc67kmxkkaurh6luaf2e2fz.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_21,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/he/chevf4d6tadiz3y2a2abr2lj2bvo3wyfykoivwj2s4xedp3vdjuf.py # Topologically Sorted Source Nodes: [tensor_8], Original ATen: [aten.add] # Source node to ATen node mapping: # tensor_8 => add_3 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %view_23), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hn/chnyp4bqchi6cc3qkpikodtjzt7sfs4gz3r2kunqaesb7ahrywso.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_24, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2i/c2it3bfvz5dki6xljhyv3o2tjme4rnp2cbavnrl4nu6kpvzqdzbp.py # Topologically Sorted Source Nodes: [tensor_10], Original ATen: [aten.mul] # Source node to ATen node mapping: # tensor_10 => mul_4 # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_25, %unsqueeze), kwargs = {}) triton_poi_fused_mul_10 = async_compile.triton('triton_poi_fused_mul_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, ), (1, )) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf3, primals_7, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_5, buf5, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_prod], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] triton_poi_fused_repeat_1.run(primals_8, buf7, 64, grid=grid(64), stream=stream0) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0); del buf2 # reuse buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_2.run(buf7, buf6, buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_3.run(buf10, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [attentioned], Original ATen: [aten.bmm] extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_5.run(primals_1, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_6.run(primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0); del buf17 # reuse buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_7.run(buf18, primals_14, buf24, 64, grid=grid(64), stream=stream0) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [tensor_8], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf20, buf16, primals_16, 64, grid=grid(64), stream=stream0) del primals_16 buf21 = buf15; del buf15 # reuse buf22 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tensor_10], Original ATen: [aten.mul] triton_poi_fused_mul_10.run(buf20, buf21, buf22, primals_17, primals_18, primals_8, buf23, 64, grid=grid(64), stream=stream0) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_8, primals_11, primals_17, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(buf20, (16, 4), (4, 1), 0), primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch.nn.functional as F def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, dropout=0): super(TransformerFFN, self).__init__() self.dropout = nn.Dropout(p=dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.dropout(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerEncoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) def forward(self, tensor, mask): tensor = tensor + self.attention(tensor, mask=mask) tensor = _normalize(tensor, self.norm1) tensor = tensor + self.ffn(tensor) tensor = _normalize(tensor, self.norm2) tensor *= mask.unsqueeze(-1).float() return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_7, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_5, buf5, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(64)](primals_8, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](primals_1, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf18, primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, primals_8, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_8, primals_11, primals_17, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor( buf20, (16, 4), (4, 1), 0), primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, dropout=0): super(TransformerFFN, self).__init__() self.dropout = nn.Dropout(p=dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.dropout(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerEncoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) def forward(self, input_0, input_1): primals_2 = self.attention.q_lin.weight primals_3 = self.attention.q_lin.bias primals_4 = self.attention.k_lin.weight primals_5 = self.attention.k_lin.bias primals_6 = self.attention.v_lin.weight primals_7 = self.attention.v_lin.bias primals_8 = self.attention.out_lin.weight primals_10 = self.attention.out_lin.bias primals_11 = self.norm1.weight primals_12 = self.norm1.bias primals_9 = self.ffn.lin1.weight primals_14 = self.ffn.lin1.bias primals_13 = self.ffn.lin2.weight primals_16 = self.ffn.lin2.bias primals_17 = self.norm2.weight primals_18 = self.norm2.bias primals_1 = input_0 primals_15 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
jinjiren/ParlAI
TransformerEncoderLayer
false
12,626
[ "MIT" ]
0
40799aeee69f2a0bb25a1341bb8da0c44861268e
https://github.com/jinjiren/ParlAI/tree/40799aeee69f2a0bb25a1341bb8da0c44861268e
ModulatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3i/c3it2w2r3z7gdnrl7rpdpqiiot4zor7mekrndtvp2kybb36uwsh5.py # Topologically Sorted Source Nodes: [mul, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # demod => rsqrt # mul => mul # pow_1 => pow_1 # sum_1 => sum_1 # weight => mul_1 # weight_1 => mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 0.125), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %view_1), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_0 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = (rindex // 16) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] stream0 = get_raw_stream(0) triton_per_fused_add_mul_pow_rsqrt_sum_0.run(buf1, primals_3, primals_2, buf2, 16, 64, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf3, (1, 16, 5, 5), (400, 25, 5, 1)) return (reinterpret_tensor(buf3, (4, 4, 5, 5), (100, 25, 5, 1), 0), primals_2, primals_3, buf1, reinterpret_tensor(buf2, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = nn.Identity() self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mul_pow_rsqrt_sum_0[grid(16)](buf1, primals_3, primals_2, buf2, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf3, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf3, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_2, primals_3, buf1, reinterpret_tensor(buf2, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = nn.Identity() self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_3 = self.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
johnberg1/psp_s
ModulatedConv2d
false
12,627
[ "Apache-2.0", "BSD-2-Clause", "MIT" ]
0
717f4c448a4e7537cf4b74067d454c7644609ca3
https://github.com/johnberg1/psp_s/tree/717f4c448a4e7537cf4b74067d454c7644609ca3
GaussianKLLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/x4/cx4e6r3wtovetgfit3c47oplgue4lpimhztju62fldej6ezu4zln.py # Topologically Sorted Source Nodes: [sub_1, exp, sub, pow_1, numerator, exp_1, fraction, add_1, sub_2, sum_1], Original ATen: [aten.sub, aten.exp, aten.pow, aten.add, aten.div, aten.sum] # Source node to ATen node mapping: # add_1 => add_1 # exp => exp # exp_1 => exp_1 # fraction => div # numerator => add # pow_1 => pow_1 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sum_1 => sum_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg0_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %pow_1), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg3_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %exp_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %div), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, 1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_2, [1]), kwargs = {}) triton_poi_fused_add_div_exp_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_add_div_exp_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_exp_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp4 = tl.load(in_ptr2 + (x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr3 + (x0 + (64*x1)), xmask) tmp14 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp18 = tl.load(in_ptr2 + (16 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr3 + (16 + x0 + (64*x1)), xmask) tmp28 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp29 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr2 + (32 + x0 + (64*x1)), xmask) tmp33 = tl.load(in_ptr3 + (32 + x0 + (64*x1)), xmask) tmp42 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp43 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp46 = tl.load(in_ptr2 + (48 + x0 + (64*x1)), xmask) tmp47 = tl.load(in_ptr3 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.exp(tmp1) tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp9 = tl_math.exp(tmp0) tmp10 = tmp8 / tmp9 tmp11 = tmp2 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.exp(tmp15) tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp17 + tmp21 tmp23 = tl_math.exp(tmp14) tmp24 = tmp22 / tmp23 tmp25 = tmp16 + tmp24 tmp26 = tmp25 - tmp12 tmp27 = tmp13 + tmp26 tmp30 = tmp28 - tmp29 tmp31 = tl_math.exp(tmp29) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp34 tmp36 = tmp31 + tmp35 tmp37 = tl_math.exp(tmp28) tmp38 = tmp36 / tmp37 tmp39 = tmp30 + tmp38 tmp40 = tmp39 - tmp12 tmp41 = tmp27 + tmp40 tmp44 = tmp42 - tmp43 tmp45 = tl_math.exp(tmp43) tmp48 = tmp46 - tmp47 tmp49 = tmp48 * tmp48 tmp50 = tmp45 + tmp49 tmp51 = tl_math.exp(tmp42) tmp52 = tmp50 / tmp51 tmp53 = tmp44 + tmp52 tmp54 = tmp53 - tmp12 tmp55 = tmp41 + tmp54 tl.store(out_ptr0 + (x2), tmp55, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wz/cwzv7qhl75i7mhmemwgafhj7m6up6wzmdfskrn6ddvszpz3eqngo.py # Topologically Sorted Source Nodes: [kl, mean], Original ATen: [aten.mul, aten.mean] # Source node to ATen node mapping: # kl => mul # mean => mean # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.5), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [0]), kwargs = {}) triton_poi_fused_mean_mul_1 = async_compile.triton('triton_poi_fused_mean_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr0 + (16 + x0), xmask) tmp6 = tl.load(in_ptr0 + (32 + x0), xmask) tmp9 = tl.load(in_ptr0 + (48 + x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, exp, sub, pow_1, numerator, exp_1, fraction, add_1, sub_2, sum_1], Original ATen: [aten.sub, aten.exp, aten.pow, aten.add, aten.div, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_add_div_exp_pow_sub_sum_0.run(arg3_1, arg0_1, arg1_1, arg2_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [kl, mean], Original ATen: [aten.mul, aten.mean] triton_poi_fused_mean_mul_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GaussianKLLoss(nn.Module): def __init__(self): super(GaussianKLLoss, self).__init__() def forward(self, mu1, logvar1, mu2, logvar2): numerator = logvar1.exp() + torch.pow(mu1 - mu2, 2) fraction = torch.div(numerator, logvar2.exp()) kl = 0.5 * torch.sum(logvar2 - logvar1 + fraction - 1, dim=1) return kl.mean(dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_exp_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr3 + (x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr3 + (16 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp29 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr3 + (32 + x0 + 64 * x1), xmask) tmp42 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp43 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp46 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp47 = tl.load(in_ptr3 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.exp(tmp1) tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp9 = tl_math.exp(tmp0) tmp10 = tmp8 / tmp9 tmp11 = tmp2 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.exp(tmp15) tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp17 + tmp21 tmp23 = tl_math.exp(tmp14) tmp24 = tmp22 / tmp23 tmp25 = tmp16 + tmp24 tmp26 = tmp25 - tmp12 tmp27 = tmp13 + tmp26 tmp30 = tmp28 - tmp29 tmp31 = tl_math.exp(tmp29) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp34 tmp36 = tmp31 + tmp35 tmp37 = tl_math.exp(tmp28) tmp38 = tmp36 / tmp37 tmp39 = tmp30 + tmp38 tmp40 = tmp39 - tmp12 tmp41 = tmp27 + tmp40 tmp44 = tmp42 - tmp43 tmp45 = tl_math.exp(tmp43) tmp48 = tmp46 - tmp47 tmp49 = tmp48 * tmp48 tmp50 = tmp45 + tmp49 tmp51 = tl_math.exp(tmp42) tmp52 = tmp50 / tmp51 tmp53 = tmp44 + tmp52 tmp54 = tmp53 - tmp12 tmp55 = tmp41 + tmp54 tl.store(out_ptr0 + x2, tmp55, xmask) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + (16 + x0), xmask) tmp6 = tl.load(in_ptr0 + (32 + x0), xmask) tmp9 = tl.load(in_ptr0 + (48 + x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_exp_pow_sub_sum_0[grid(64)](arg3_1, arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mean_mul_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 return buf1, class GaussianKLLossNew(nn.Module): def __init__(self): super(GaussianKLLossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
johnson7788/Info-HCVAE
GaussianKLLoss
false
12,628
[ "Apache-2.0" ]
0
f43bf705aab3dcdc340ded3be09fb87420a48c51
https://github.com/johnson7788/Info-HCVAE/tree/f43bf705aab3dcdc340ded3be09fb87420a48c51
CategoricalKLLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/kz/ckzbjorew6d2qnczvglsrdyz6betqwacszox7jxg7ucfo22ke4tw.py # Topologically Sorted Source Nodes: [log_P, log_Q, sub, mul, sum_1, kl], Original ATen: [aten.log, aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # kl => sum_2 # log_P => log # log_Q => log_1 # mul => mul # sub => sub # sum_1 => sum_1 # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sub), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [-1]), kwargs = {}) triton_poi_fused_log_mul_sub_sum_0 = async_compile.triton('triton_poi_fused_log_mul_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_log_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_log_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (16*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr1 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp42 = tl.load(in_ptr1 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp49 = tl.load(in_ptr1 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr1 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp61 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp63 = tl.load(in_ptr1 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp68 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp70 = tl.load(in_ptr1 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp75 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp77 = tl.load(in_ptr1 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp83 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp85 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp89 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp91 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp96 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp98 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp103 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp105 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tmp0 * tmp4 tmp7 = tl_math.log(tmp6) tmp9 = tl_math.log(tmp8) tmp10 = tmp7 - tmp9 tmp11 = tmp6 * tmp10 tmp12 = tmp5 + tmp11 tmp14 = tl_math.log(tmp13) tmp16 = tl_math.log(tmp15) tmp17 = tmp14 - tmp16 tmp18 = tmp13 * tmp17 tmp19 = tmp12 + tmp18 tmp21 = tl_math.log(tmp20) tmp23 = tl_math.log(tmp22) tmp24 = tmp21 - tmp23 tmp25 = tmp20 * tmp24 tmp26 = tmp19 + tmp25 tmp28 = tl_math.log(tmp27) tmp30 = tl_math.log(tmp29) tmp31 = tmp28 - tmp30 tmp32 = tmp27 * tmp31 tmp34 = tl_math.log(tmp33) tmp36 = tl_math.log(tmp35) tmp37 = tmp34 - tmp36 tmp38 = tmp33 * tmp37 tmp39 = tmp32 + tmp38 tmp41 = tl_math.log(tmp40) tmp43 = tl_math.log(tmp42) tmp44 = tmp41 - tmp43 tmp45 = tmp40 * tmp44 tmp46 = tmp39 + tmp45 tmp48 = tl_math.log(tmp47) tmp50 = tl_math.log(tmp49) tmp51 = tmp48 - tmp50 tmp52 = tmp47 * tmp51 tmp53 = tmp46 + tmp52 tmp54 = tmp26 + tmp53 tmp56 = tl_math.log(tmp55) tmp58 = tl_math.log(tmp57) tmp59 = tmp56 - tmp58 tmp60 = tmp55 * tmp59 tmp62 = tl_math.log(tmp61) tmp64 = tl_math.log(tmp63) tmp65 = tmp62 - tmp64 tmp66 = tmp61 * tmp65 tmp67 = tmp60 + tmp66 tmp69 = tl_math.log(tmp68) tmp71 = tl_math.log(tmp70) tmp72 = tmp69 - tmp71 tmp73 = tmp68 * tmp72 tmp74 = tmp67 + tmp73 tmp76 = tl_math.log(tmp75) tmp78 = tl_math.log(tmp77) tmp79 = tmp76 - tmp78 tmp80 = tmp75 * tmp79 tmp81 = tmp74 + tmp80 tmp82 = tmp54 + tmp81 tmp84 = tl_math.log(tmp83) tmp86 = tl_math.log(tmp85) tmp87 = tmp84 - tmp86 tmp88 = tmp83 * tmp87 tmp90 = tl_math.log(tmp89) tmp92 = tl_math.log(tmp91) tmp93 = tmp90 - tmp92 tmp94 = tmp89 * tmp93 tmp95 = tmp88 + tmp94 tmp97 = tl_math.log(tmp96) tmp99 = tl_math.log(tmp98) tmp100 = tmp97 - tmp99 tmp101 = tmp96 * tmp100 tmp102 = tmp95 + tmp101 tmp104 = tl_math.log(tmp103) tmp106 = tl_math.log(tmp105) tmp107 = tmp104 - tmp106 tmp108 = tmp103 * tmp107 tmp109 = tmp102 + tmp108 tmp110 = tmp82 + tmp109 tl.store(out_ptr0 + (x0), tmp110, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5i/c5ihqrojuny4xsnxeyq3omcrs6nck43hbetaz5t5kzhjm6ln2y6j.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sum_2, [0]), kwargs = {}) triton_poi_fused_mean_1 = async_compile.triton('triton_poi_fused_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_P, log_Q, sub, mul, sum_1, kl], Original ATen: [aten.log, aten.sub, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_log_mul_sub_sum_0.run(arg0_1, arg1_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] triton_poi_fused_mean_1.run(buf0, buf1, 4, grid=grid(4), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class CategoricalKLLoss(nn.Module): def __init__(self): super(CategoricalKLLoss, self).__init__() def forward(self, P, Q): log_P = P.log() log_Q = Q.log() kl = (P * (log_P - log_Q)).sum(dim=-1).sum(dim=-1) return kl.mean(dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + 16 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr1 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr1 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr1 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr1 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp49 = tl.load(in_ptr1 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp55 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp57 = tl.load(in_ptr1 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp61 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp63 = tl.load(in_ptr1 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp68 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp70 = tl.load(in_ptr1 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp75 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp77 = tl.load(in_ptr1 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp83 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp85 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp89 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp91 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp96 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp98 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp103 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp105 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tmp0 * tmp4 tmp7 = tl_math.log(tmp6) tmp9 = tl_math.log(tmp8) tmp10 = tmp7 - tmp9 tmp11 = tmp6 * tmp10 tmp12 = tmp5 + tmp11 tmp14 = tl_math.log(tmp13) tmp16 = tl_math.log(tmp15) tmp17 = tmp14 - tmp16 tmp18 = tmp13 * tmp17 tmp19 = tmp12 + tmp18 tmp21 = tl_math.log(tmp20) tmp23 = tl_math.log(tmp22) tmp24 = tmp21 - tmp23 tmp25 = tmp20 * tmp24 tmp26 = tmp19 + tmp25 tmp28 = tl_math.log(tmp27) tmp30 = tl_math.log(tmp29) tmp31 = tmp28 - tmp30 tmp32 = tmp27 * tmp31 tmp34 = tl_math.log(tmp33) tmp36 = tl_math.log(tmp35) tmp37 = tmp34 - tmp36 tmp38 = tmp33 * tmp37 tmp39 = tmp32 + tmp38 tmp41 = tl_math.log(tmp40) tmp43 = tl_math.log(tmp42) tmp44 = tmp41 - tmp43 tmp45 = tmp40 * tmp44 tmp46 = tmp39 + tmp45 tmp48 = tl_math.log(tmp47) tmp50 = tl_math.log(tmp49) tmp51 = tmp48 - tmp50 tmp52 = tmp47 * tmp51 tmp53 = tmp46 + tmp52 tmp54 = tmp26 + tmp53 tmp56 = tl_math.log(tmp55) tmp58 = tl_math.log(tmp57) tmp59 = tmp56 - tmp58 tmp60 = tmp55 * tmp59 tmp62 = tl_math.log(tmp61) tmp64 = tl_math.log(tmp63) tmp65 = tmp62 - tmp64 tmp66 = tmp61 * tmp65 tmp67 = tmp60 + tmp66 tmp69 = tl_math.log(tmp68) tmp71 = tl_math.log(tmp70) tmp72 = tmp69 - tmp71 tmp73 = tmp68 * tmp72 tmp74 = tmp67 + tmp73 tmp76 = tl_math.log(tmp75) tmp78 = tl_math.log(tmp77) tmp79 = tmp76 - tmp78 tmp80 = tmp75 * tmp79 tmp81 = tmp74 + tmp80 tmp82 = tmp54 + tmp81 tmp84 = tl_math.log(tmp83) tmp86 = tl_math.log(tmp85) tmp87 = tmp84 - tmp86 tmp88 = tmp83 * tmp87 tmp90 = tl_math.log(tmp89) tmp92 = tl_math.log(tmp91) tmp93 = tmp90 - tmp92 tmp94 = tmp89 * tmp93 tmp95 = tmp88 + tmp94 tmp97 = tl_math.log(tmp96) tmp99 = tl_math.log(tmp98) tmp100 = tmp97 - tmp99 tmp101 = tmp96 * tmp100 tmp102 = tmp95 + tmp101 tmp104 = tl_math.log(tmp103) tmp106 = tl_math.log(tmp105) tmp107 = tmp104 - tmp106 tmp108 = tmp103 * tmp107 tmp109 = tmp102 + tmp108 tmp110 = tmp82 + tmp109 tl.store(out_ptr0 + x0, tmp110, xmask) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_mul_sub_sum_0[grid(16)](arg0_1, arg1_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps =1, num_stages=1) del buf0 return buf1, class CategoricalKLLossNew(nn.Module): def __init__(self): super(CategoricalKLLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
johnson7788/Info-HCVAE
CategoricalKLLoss
false
12,629
[ "Apache-2.0" ]
0
f43bf705aab3dcdc340ded3be09fb87420a48c51
https://github.com/johnson7788/Info-HCVAE/tree/f43bf705aab3dcdc340ded3be09fb87420a48c51
Feedback
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/y6/cy6g24sdqsjaqjfbvlsjwxjxnicovho2svumdpyu3a6q2ahveris.py # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] # Source node to ATen node mapping: # leaky_relu => gt, mul, view_3, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_23, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + (x4), tmp7, xmask) tl.store(out_ptr0 + (x4), tmp7, xmask) tl.store(out_ptr1 + (x4), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view] # Source node to ATen node mapping: # linear_1 => view_7 # Graph fragment: # %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {}) triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gk/cgkxbks6h4eqrscppxnaorv4esw33shmgdrwqxjp2hxbln2y3baz.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] # Source node to ATen node mapping: # h => gt_1, mul_1, view_10, where_1 # Graph fragment: # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_8, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, 0.2), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_8, %mul_1), kwargs = {}) # %view_10 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_9, [4, 4, 4, 4]), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_15, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_view_2 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_view_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + (x4), tmp7, xmask) tl.store(out_ptr1 + (x4), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0.run(buf1, primals_2, buf2, buf8, 256, grid=grid(256), stream=stream0) del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf1, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_view_2.run(buf5, primals_5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del primals_5 return (buf6, buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Feedback(nn.Module): def __init__(self, opt): super(Feedback, self).__init__() self.fc1 = nn.Linear(opt.ngh, opt.ngh) self.fc2 = nn.Linear(opt.ngh, opt.ngh) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, x): self.x1 = self.lrelu(self.fc1(x)) h = self.lrelu(self.fc2(self.x1)) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(ngh=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr1 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr1 + x4, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0[grid(256)](buf1, primals_2, buf2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_view_2[grid(256)](buf5, primals_5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_5 return buf6, buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf3, buf7, primals_4, buf8 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class FeedbackNew(nn.Module): def __init__(self, opt): super(FeedbackNew, self).__init__() self.fc1 = nn.Linear(opt.ngh, opt.ngh) self.fc2 = nn.Linear(opt.ngh, opt.ngh) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
IacoSimoncini/tfvaegan
Feedback
false
12,630
[ "MIT" ]
0
157b526d65d0b0d5412f4be6fed02fc7d6325827
https://github.com/IacoSimoncini/tfvaegan/tree/157b526d65d0b0d5412f4be6fed02fc7d6325827
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vm/cvmd2my75pirzl3upmpcp4yocpoihvichzmsknlesyku5iby4fr2.py # Topologically Sorted Source Nodes: [mul, weight], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # weight => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ai/caibhmblp5htnyf6mjkfxs57zeinkinqel7wxex5e5wmlzgcsmxm.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] # Source node to ATen node mapping: # out_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_4), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_4, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, weight], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_3, primals_2, buf0, 48, grid=grid(48), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf0, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf1, (1, 12, 4, 4), (192, 16, 4, 1)) buf2 = reinterpret_tensor(buf1, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf2, primals_4, 192, grid=grid(192), stream=stream0) del primals_4 return (buf2, primals_2, reinterpret_tensor(buf0, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = nn.Identity() self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_4, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(48)](primals_3, primals_2, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf0, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf1, (1, 12, 4, 4), (192, 16, 4, 1)) buf2 = reinterpret_tensor(buf1, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(192)](buf2, primals_4, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf2, primals_2, reinterpret_tensor(buf0, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = nn.Identity() self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_4 = self.bias primals_3 = self.conv.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
johnberg1/psp_s
ToRGB
false
12,631
[ "Apache-2.0", "BSD-2-Clause", "MIT" ]
0
717f4c448a4e7537cf4b74067d454c7644609ca3
https://github.com/johnberg1/psp_s/tree/717f4c448a4e7537cf4b74067d454c7644609ca3
SirenLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ej/cejzhnnynxtkiot2qt7feea4bkwhxo5g2qmtwe2jbyvjefkkzt6m.py # Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin] # Source node to ATen node mapping: # mul => mul # sin => sin # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 30), kwargs = {}) # %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul,), kwargs = {}) triton_poi_fused_mul_sin_0 = async_compile.triton('triton_poi_fused_mul_sin_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 30.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.sin(tmp2) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin] stream0 = get_raw_stream(0) triton_poi_fused_mul_sin_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn class SirenLayer(nn.Module): def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False): super().__init__() self.in_f = in_f self.w0 = w0 self.linear = nn.Linear(in_f, out_f) self.is_first = is_first self.is_last = is_last self.init_weights() def init_weights(self): b = 1 / self.in_f if self.is_first else np.sqrt(6 / self.in_f ) / self.w0 with torch.no_grad(): self.linear.weight.uniform_(-b, b) def forward(self, x, **kwargs): x = self.linear(x) return x if self.is_last else torch.sin(self.w0 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_f': 4, 'out_f': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 30.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.sin(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class SirenLayerNew(nn.Module): def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False): super().__init__() self.in_f = in_f self.w0 = w0 self.linear = nn.Linear(in_f, out_f) self.is_first = is_first self.is_last = is_last self.init_weights() def init_weights(self): b = 1 / self.in_f if self.is_first else np.sqrt(6 / self.in_f ) / self.w0 with torch.no_grad(): self.linear.weight.uniform_(-b, b) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jonathanfrawley/pixel-nerf
SirenLayer
false
12,632
[ "BSD-2-Clause" ]
0
11d06decbda363d6c5188ec45091da8605da4dfd
https://github.com/jonathanfrawley/pixel-nerf/tree/11d06decbda363d6c5188ec45091da8605da4dfd
PredictionConvolutions
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/v7/cv7twjpvniqwhbel4vwonpnol3esiypsqnympotsly6desh4camn.py # Topologically Sorted Source Nodes: [locs], Original ATen: [aten.cat] # Source node to ATen node mapping: # locs => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3, %view_4, %view_5, %view_6], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 14, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2359296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4) % 147456 x0 = xindex % 4 x2 = (xindex // 589824) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 16384, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4096*((x0 + (4*x1)) % 16)) + (65536*(((x0 + (4*x1) + (65536*x2)) // 65536) % 4)) + (((x0 + (4*x1)) // 16) % 4096)), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + ((x0 + (4*x1)) % 16), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 40960, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + ((4096*((x0 + (4*((-16384) + x1))) % 24)) + (98304*(((x0 + (4*((-16384) + x1)) + (98304*x2)) // 98304) % 4)) + (((x0 + (4*((-16384) + x1))) // 24) % 4096)), tmp13, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr3 + ((x0 + (4*((-16384) + x1))) % 24), tmp13, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 65536, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + ((4096*((x0 + (4*((-40960) + x1))) % 24)) + (98304*(((x0 + (4*((-40960) + x1)) + (98304*x2)) // 98304) % 4)) + (((x0 + (4*((-40960) + x1))) // 24) % 4096)), tmp22, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr5 + ((x0 + (4*((-40960) + x1))) % 24), tmp22, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tmp29 = tl.full([1], 90112, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr6 + ((4096*((x0 + (4*((-65536) + x1))) % 24)) + (98304*(((x0 + (4*((-65536) + x1)) + (98304*x2)) // 98304) % 4)) + (((x0 + (4*((-65536) + x1))) // 24) % 4096)), tmp31, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr7 + ((x0 + (4*((-65536) + x1))) % 24), tmp31, eviction_policy='evict_last', other=0.0) tmp34 = tmp32 + tmp33 tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp31, tmp34, tmp35) tmp37 = tmp0 >= tmp29 tmp38 = tl.full([1], 114688, tl.int64) tmp39 = tmp0 < tmp38 tmp40 = tmp37 & tmp39 tmp41 = tl.load(in_ptr8 + ((4096*((x0 + (4*((-90112) + x1))) % 24)) + (98304*(((x0 + (4*((-90112) + x1)) + (98304*x2)) // 98304) % 4)) + (((x0 + (4*((-90112) + x1))) // 24) % 4096)), tmp40, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr9 + ((x0 + (4*((-90112) + x1))) % 24), tmp40, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 + tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp40, tmp43, tmp44) tmp46 = tmp0 >= tmp38 tmp47 = tl.full([1], 131072, tl.int64) tmp48 = tmp0 < tmp47 tmp49 = tmp46 & tmp48 tmp50 = tl.load(in_ptr10 + ((4096*((x0 + (4*((-114688) + x1))) % 16)) + (65536*(((x0 + (4*((-114688) + x1)) + (65536*x2)) // 65536) % 4)) + (((x0 + (4*((-114688) + x1))) // 16) % 4096)), tmp49, eviction_policy='evict_last', other=0.0) tmp51 = tl.load(in_ptr11 + ((x0 + (4*((-114688) + x1))) % 16), tmp49, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 + tmp51 tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp49, tmp52, tmp53) tmp55 = tmp0 >= tmp47 tmp56 = tl.full([1], 147456, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tl.load(in_ptr12 + ((4096*((x0 + (4*((-131072) + x1))) % 16)) + (65536*(((x0 + (4*((-131072) + x1)) + (65536*x2)) // 65536) % 4)) + (((x0 + (4*((-131072) + x1))) // 16) % 4096)), tmp55, eviction_policy='evict_last', other=0.0) tmp59 = tl.load(in_ptr13 + ((x0 + (4*((-131072) + x1))) % 16), tmp55, eviction_policy='evict_last', other=0.0) tmp60 = tmp58 + tmp59 tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp55, tmp60, tmp61) tmp63 = tl.where(tmp49, tmp54, tmp62) tmp64 = tl.where(tmp40, tmp45, tmp63) tmp65 = tl.where(tmp31, tmp36, tmp64) tmp66 = tl.where(tmp22, tmp27, tmp65) tmp67 = tl.where(tmp13, tmp18, tmp66) tmp68 = tl.where(tmp4, tmp9, tmp67) tl.store(out_ptr0 + (x3), tmp68, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35 = args args.clear() assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_2, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_5, (24, ), (1, )) assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_7, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_8, (24, ), (1, )) assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_10, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (24, ), (1, )) assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_13, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_14, (24, ), (1, )) assert_size_stride(primals_15, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_16, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (16, ), (1, )) assert_size_stride(primals_18, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_19, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_20, (16, ), (1, )) assert_size_stride(primals_21, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_22, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (16, ), (1, )) assert_size_stride(primals_24, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_25, (24, ), (1, )) assert_size_stride(primals_26, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (24, ), (1, )) assert_size_stride(primals_28, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (24, ), (1, )) assert_size_stride(primals_30, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (24, ), (1, )) assert_size_stride(primals_32, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_33, (16, ), (1, )) assert_size_stride(primals_34, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [l_conv4_3], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) # Topologically Sorted Source Nodes: [l_conv7], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 24, 64, 64), (98304, 4096, 64, 1)) # Topologically Sorted Source Nodes: [l_conv8_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 24, 64, 64), (98304, 4096, 64, 1)) # Topologically Sorted Source Nodes: [l_conv9_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(primals_12, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 24, 64, 64), (98304, 4096, 64, 1)) # Topologically Sorted Source Nodes: [l_conv10_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(primals_15, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 24, 64, 64), (98304, 4096, 64, 1)) # Topologically Sorted Source Nodes: [l_conv11_2], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(primals_18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 16, 64, 64), (65536, 4096, 64, 1)) # Topologically Sorted Source Nodes: [l_conv12_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(primals_21, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1)) # Topologically Sorted Source Nodes: [c_conv4_3], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(primals_1, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 16, 64, 64), (65536, 4096, 64, 1)) # Topologically Sorted Source Nodes: [c_conv7], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(primals_6, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 24, 64, 64), (98304, 4096, 64, 1)) # Topologically Sorted Source Nodes: [c_conv8_2], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(primals_9, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 24, 64, 64), (98304, 4096, 64, 1)) # Topologically Sorted Source Nodes: [c_conv9_2], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(primals_12, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 24, 64, 64), (98304, 4096, 64, 1)) # Topologically Sorted Source Nodes: [c_conv10_2], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(primals_15, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 24, 64, 64), (98304, 4096, 64, 1)) # Topologically Sorted Source Nodes: [c_conv11_2], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(primals_18, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 16, 64, 64), (65536, 4096, 64, 1)) # Topologically Sorted Source Nodes: [c_conv12_2], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(primals_21, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf14 = empty_strided_cuda((4, 147456, 4), (589824, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [locs], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_3, buf1, primals_5, buf2, primals_8, buf3, primals_11, buf4, primals_14, buf5, primals_17, buf6, primals_20, buf14, 2359296, grid=grid(2359296), stream=stream0) del buf0 del buf1 del buf2 del buf3 del buf4 del buf5 del buf6 del primals_11 del primals_14 del primals_17 del primals_20 del primals_3 del primals_5 del primals_8 buf15 = empty_strided_cuda((4, 147456, 4), (589824, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [classes_scores], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf7, primals_23, buf8, primals_25, buf9, primals_27, buf10, primals_29, buf11, primals_31, buf12, primals_33, buf13, primals_35, buf15, 2359296, grid=grid(2359296), stream=stream0) del buf10 del buf11 del buf12 del buf13 del buf7 del buf8 del buf9 del primals_23 del primals_25 del primals_27 del primals_29 del primals_31 del primals_33 del primals_35 return (buf14, buf15, primals_1, primals_2, primals_4, primals_6, primals_7, primals_9, primals_10, primals_12, primals_13, primals_15, primals_16, primals_18, primals_19, primals_21, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((24, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((24, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((16, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((24, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((24, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.optim import torch.utils.data class PredictionConvolutions(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 24564 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 24564 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 6, 'conv11_2': 4, 'conv12_2': 4} self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size= 3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) self.loc_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * 4, kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * n_classes, kernel_size=3, padding=1) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats): batch_size = conv4_3_feats.size(0) l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous() l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() l_conv7 = l_conv7.view(batch_size, -1, 4) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) l_conv12_2 = self.loc_conv12_2(conv12_2_feats) l_conv12_2 = l_conv12_2.permute(0, 2, 3, 1).contiguous() l_conv12_2 = l_conv12_2.view(batch_size, -1, 4) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous() c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() c_conv7 = c_conv7.view(batch_size, -1, self.n_classes) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) c_conv12_2 = self.cl_conv12_2(conv12_2_feats) c_conv12_2 = c_conv12_2.permute(0, 2, 3, 1).contiguous() c_conv12_2 = c_conv12_2.view(batch_size, -1, self.n_classes) locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2, l_conv12_2], dim=1) classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2, c_conv12_2], dim=1) return locs, classes_scores def get_inputs(): return [torch.rand([4, 512, 64, 64]), torch.rand([4, 1024, 64, 64]), torch.rand([4, 512, 64, 64]), torch.rand([4, 256, 64, 64]), torch. rand([4, 256, 64, 64]), torch.rand([4, 256, 64, 64]), torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 147456 x0 = xindex % 4 x2 = xindex // 589824 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 16384, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4096 * ((x0 + 4 * x1) % 16) + 65536 * ((x0 + 4 * x1 + 65536 * x2) // 65536 % 4) + (x0 + 4 * x1) // 16 % 4096), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 4 * x1) % 16, tmp4, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 40960, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (4096 * ((x0 + 4 * (-16384 + x1)) % 24) + 98304 * ((x0 + 4 * (-16384 + x1) + 98304 * x2) // 98304 % 4) + (x0 + 4 * (-16384 + x1)) // 24 % 4096), tmp13, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.load(in_ptr3 + (x0 + 4 * (-16384 + x1)) % 24, tmp13, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 65536, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + (4096 * ((x0 + 4 * (-40960 + x1)) % 24) + 98304 * ((x0 + 4 * (-40960 + x1) + 98304 * x2) // 98304 % 4) + (x0 + 4 * (-40960 + x1)) // 24 % 4096), tmp22, eviction_policy= 'evict_last', other=0.0) tmp24 = tl.load(in_ptr5 + (x0 + 4 * (-40960 + x1)) % 24, tmp22, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tmp29 = tl.full([1], 90112, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr6 + (4096 * ((x0 + 4 * (-65536 + x1)) % 24) + 98304 * ((x0 + 4 * (-65536 + x1) + 98304 * x2) // 98304 % 4) + (x0 + 4 * (-65536 + x1)) // 24 % 4096), tmp31, eviction_policy= 'evict_last', other=0.0) tmp33 = tl.load(in_ptr7 + (x0 + 4 * (-65536 + x1)) % 24, tmp31, eviction_policy='evict_last', other=0.0) tmp34 = tmp32 + tmp33 tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp31, tmp34, tmp35) tmp37 = tmp0 >= tmp29 tmp38 = tl.full([1], 114688, tl.int64) tmp39 = tmp0 < tmp38 tmp40 = tmp37 & tmp39 tmp41 = tl.load(in_ptr8 + (4096 * ((x0 + 4 * (-90112 + x1)) % 24) + 98304 * ((x0 + 4 * (-90112 + x1) + 98304 * x2) // 98304 % 4) + (x0 + 4 * (-90112 + x1)) // 24 % 4096), tmp40, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr9 + (x0 + 4 * (-90112 + x1)) % 24, tmp40, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 + tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp40, tmp43, tmp44) tmp46 = tmp0 >= tmp38 tmp47 = tl.full([1], 131072, tl.int64) tmp48 = tmp0 < tmp47 tmp49 = tmp46 & tmp48 tmp50 = tl.load(in_ptr10 + (4096 * ((x0 + 4 * (-114688 + x1)) % 16) + 65536 * ((x0 + 4 * (-114688 + x1) + 65536 * x2) // 65536 % 4) + (x0 + 4 * (-114688 + x1)) // 16 % 4096), tmp49, eviction_policy= 'evict_last', other=0.0) tmp51 = tl.load(in_ptr11 + (x0 + 4 * (-114688 + x1)) % 16, tmp49, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 + tmp51 tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp49, tmp52, tmp53) tmp55 = tmp0 >= tmp47 tl.full([1], 147456, tl.int64) tmp58 = tl.load(in_ptr12 + (4096 * ((x0 + 4 * (-131072 + x1)) % 16) + 65536 * ((x0 + 4 * (-131072 + x1) + 65536 * x2) // 65536 % 4) + (x0 + 4 * (-131072 + x1)) // 16 % 4096), tmp55, eviction_policy= 'evict_last', other=0.0) tmp59 = tl.load(in_ptr13 + (x0 + 4 * (-131072 + x1)) % 16, tmp55, eviction_policy='evict_last', other=0.0) tmp60 = tmp58 + tmp59 tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp55, tmp60, tmp61) tmp63 = tl.where(tmp49, tmp54, tmp62) tmp64 = tl.where(tmp40, tmp45, tmp63) tmp65 = tl.where(tmp31, tmp36, tmp64) tmp66 = tl.where(tmp22, tmp27, tmp65) tmp67 = tl.where(tmp13, tmp18, tmp66) tmp68 = tl.where(tmp4, tmp9, tmp67) tl.store(out_ptr0 + x3, tmp68, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35) = args args.clear() assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_2, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_5, (24,), (1,)) assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_7, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_8, (24,), (1,)) assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_10, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (24,), (1,)) assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_13, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_14, (24,), (1,)) assert_size_stride(primals_15, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_16, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (16,), (1,)) assert_size_stride(primals_18, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_19, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_20, (16,), (1,)) assert_size_stride(primals_21, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_22, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (16,), (1,)) assert_size_stride(primals_24, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_25, (24,), (1,)) assert_size_stride(primals_26, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (24,), (1,)) assert_size_stride(primals_28, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (24,), (1,)) assert_size_stride(primals_30, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (24,), (1,)) assert_size_stride(primals_32, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_33, (16,), (1,)) assert_size_stride(primals_34, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf3 = extern_kernels.convolution(primals_12, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf4 = extern_kernels.convolution(primals_15, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf5 = extern_kernels.convolution(primals_18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf6 = extern_kernels.convolution(primals_21, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf7 = extern_kernels.convolution(primals_1, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf8 = extern_kernels.convolution(primals_6, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf9 = extern_kernels.convolution(primals_9, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf10 = extern_kernels.convolution(primals_12, primals_28, stride=( 1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf11 = extern_kernels.convolution(primals_15, primals_30, stride=( 1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf12 = extern_kernels.convolution(primals_18, primals_32, stride=( 1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf13 = extern_kernels.convolution(primals_21, primals_34, stride=( 1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf14 = empty_strided_cuda((4, 147456, 4), (589824, 4, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(2359296)](buf0, primals_3, buf1, primals_5, buf2, primals_8, buf3, primals_11, buf4, primals_14, buf5, primals_17, buf6, primals_20, buf14, 2359296, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del buf1 del buf2 del buf3 del buf4 del buf5 del buf6 del primals_11 del primals_14 del primals_17 del primals_20 del primals_3 del primals_5 del primals_8 buf15 = empty_strided_cuda((4, 147456, 4), (589824, 4, 1), torch. float32) triton_poi_fused_cat_0[grid(2359296)](buf7, primals_23, buf8, primals_25, buf9, primals_27, buf10, primals_29, buf11, primals_31, buf12, primals_33, buf13, primals_35, buf15, 2359296, XBLOCK=512, num_warps=8, num_stages=1) del buf10 del buf11 del buf12 del buf13 del buf7 del buf8 del buf9 del primals_23 del primals_25 del primals_27 del primals_29 del primals_31 del primals_33 del primals_35 return (buf14, buf15, primals_1, primals_2, primals_4, primals_6, primals_7, primals_9, primals_10, primals_12, primals_13, primals_15, primals_16, primals_18, primals_19, primals_21, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34) class PredictionConvolutionsNew(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 24564 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 24564 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutionsNew, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 6, 'conv11_2': 4, 'conv12_2': 4} self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size= 3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) self.loc_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * 4, kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * n_classes, kernel_size=3, padding=1) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, input_0, input_1, input_2, input_3, input_4, input_5, input_6): primals_2 = self.loc_conv4_3.weight primals_3 = self.loc_conv4_3.bias primals_4 = self.loc_conv7.weight primals_5 = self.loc_conv7.bias primals_7 = self.loc_conv8_2.weight primals_8 = self.loc_conv8_2.bias primals_10 = self.loc_conv9_2.weight primals_11 = self.loc_conv9_2.bias primals_13 = self.loc_conv10_2.weight primals_14 = self.loc_conv10_2.bias primals_16 = self.loc_conv11_2.weight primals_17 = self.loc_conv11_2.bias primals_19 = self.loc_conv12_2.weight primals_20 = self.loc_conv12_2.bias primals_22 = self.cl_conv4_3.weight primals_23 = self.cl_conv4_3.bias primals_24 = self.cl_conv7.weight primals_25 = self.cl_conv7.bias primals_26 = self.cl_conv8_2.weight primals_27 = self.cl_conv8_2.bias primals_28 = self.cl_conv9_2.weight primals_29 = self.cl_conv9_2.bias primals_30 = self.cl_conv10_2.weight primals_31 = self.cl_conv10_2.bias primals_32 = self.cl_conv11_2.weight primals_33 = self.cl_conv11_2.bias primals_34 = self.cl_conv12_2.weight primals_35 = self.cl_conv12_2.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_12 = input_3 primals_15 = input_4 primals_18 = input_5 primals_21 = input_6 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35]) return output[0], output[1]
doduythao/ssd
PredictionConvolutions
false
12,633
[ "MIT" ]
0
170064a3edef05d3274b08ea7f622eb3238b5c5c
https://github.com/doduythao/ssd/tree/170064a3edef05d3274b08ea7f622eb3238b5c5c
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/7s/c7sdgdzvetijpkvfeu3p5aih3qchwft4zmzs7hfsf4vbwypgyrim.py # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu] # Source node to ATen node mapping: # add => add # x => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uz/cuzbnz5k52xhjpdoaucm3u5qsyj2prasqn3xsty665xnmdng4lmj.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_7 => relu_3 # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_12), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 1024), (1024, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1024, ), (1, )) assert_size_stride(primals_5, (1024, 1024), (1024, 1)) assert_size_stride(primals_6, (1024, ), (1, )) assert_size_stride(primals_7, (1024, 1024), (1024, 1)) assert_size_stride(primals_8, (1024, ), (1, )) assert_size_stride(primals_9, (1024, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (2048, 4), (4, 1)) assert_size_stride(primals_12, (2048, ), (1, )) assert_size_stride(primals_13, (2048, 2048), (2048, 1)) assert_size_stride(primals_14, (2048, ), (1, )) assert_size_stride(primals_15, (2048, 2048), (2048, 1)) assert_size_stride(primals_16, (2048, ), (1, )) assert_size_stride(primals_17, (4, 2048), (2048, 1)) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_0.run(buf2, primals_4, 4096, grid=grid(4096), stream=stream0) del primals_4 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [add_1, x_2], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_0.run(buf5, primals_6, 4096, grid=grid(4096), stream=stream0) del primals_6 buf6 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [support_2], Original ATen: [aten.mm] extern_kernels.mm(buf5, primals_7, out=buf6) buf7 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf6, out=buf7) del buf6 buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [add_2, x_4], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_0.run(buf8, primals_8, 4096, grid=grid(4096), stream=stream0) del primals_8 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support_3], Original ATen: [aten.mm] extern_kernels.mm(buf8, primals_9, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.addmm(primals_10, primals_3, buf9, alpha=1, beta=1, out=buf10) del primals_10 buf11 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (4, 2048), (1, 4), 0), out=buf11) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf12, primals_12, 8192, grid=grid(8192), stream=stream0) del primals_12 buf13 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (2048, 2048), (1, 2048), 0), out=buf13) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf14, primals_14, 8192, grid=grid(8192), stream=stream0) del primals_14 buf15 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf14, reinterpret_tensor(primals_15, (2048, 2048), (1, 2048), 0), out=buf15) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf16, primals_16, 8192, grid=grid(8192), stream=stream0) del primals_16 buf17 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_18, buf16, reinterpret_tensor(primals_17, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf17) del primals_18 return (buf17, buf2, buf5, buf8, buf10, buf12, buf14, buf16, primals_17, primals_15, primals_13, primals_11, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_9, (4, 1024), (1, 4), 0), reinterpret_tensor(primals_7, (1024, 1024), (1, 1024), 0), reinterpret_tensor(primals_5, (1024, 1024), (1, 1024), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((2048, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((2048, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import math import torch from torch.nn.parameter import Parameter from torch.nn.modules.module import Module import torch.nn as nn import torch.nn.functional as F class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, 1024) self.gc2 = GraphConvolution(1024, 1024) self.gc3 = GraphConvolution(1024, 1024) self.gc4 = GraphConvolution(1024, nhid) self.linear1 = nn.Linear(nhid, 2048) self.linear2 = nn.Linear(2048, 2048) self.linear3 = nn.Linear(2048, 2048) self.linear4 = nn.Linear(2048, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.gc2(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.gc3(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc4(x, adj) x = F.relu(self.linear1(x)) x = F.relu(self.linear2(x)) x = F.relu(self.linear3(x)) return self.linear4(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import math from torch.nn.parameter import Parameter from torch.nn.modules.module import Module import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 1024), (1024, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1024,), (1,)) assert_size_stride(primals_5, (1024, 1024), (1024, 1)) assert_size_stride(primals_6, (1024,), (1,)) assert_size_stride(primals_7, (1024, 1024), (1024, 1)) assert_size_stride(primals_8, (1024,), (1,)) assert_size_stride(primals_9, (1024, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (2048, 4), (4, 1)) assert_size_stride(primals_12, (2048,), (1,)) assert_size_stride(primals_13, (2048, 2048), (2048, 1)) assert_size_stride(primals_14, (2048,), (1,)) assert_size_stride(primals_15, (2048, 2048), (2048, 1)) assert_size_stride(primals_16, (2048,), (1,)) assert_size_stride(primals_17, (4, 2048), (2048, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(4096)](buf2, primals_4, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf4 del buf4 triton_poi_fused_add_relu_0[grid(4096)](buf5, primals_6, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = buf3 del buf3 extern_kernels.mm(buf5, primals_7, out=buf6) buf7 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, buf6, out=buf7) del buf6 buf8 = buf7 del buf7 triton_poi_fused_add_relu_0[grid(4096)](buf8, primals_8, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf8, primals_9, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, primals_3, buf9, alpha=1, beta=1, out=buf10) del primals_10 buf11 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (4, 2048), (1, 4), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_1[grid(8192)](buf12, primals_12, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_12 buf13 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (2048, 2048 ), (1, 2048), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_1[grid(8192)](buf14, primals_14, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_14 buf15 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf14, reinterpret_tensor(primals_15, (2048, 2048 ), (1, 2048), 0), out=buf15) buf16 = buf15 del buf15 triton_poi_fused_relu_1[grid(8192)](buf16, primals_16, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_16 buf17 = buf9 del buf9 extern_kernels.addmm(primals_18, buf16, reinterpret_tensor( primals_17, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf17) del primals_18 return (buf17, buf2, buf5, buf8, buf10, buf12, buf14, buf16, primals_17, primals_15, primals_13, primals_11, reinterpret_tensor(primals_3, ( 4, 4), (1, 4), 0), reinterpret_tensor(primals_9, (4, 1024), (1, 4), 0), reinterpret_tensor(primals_7, (1024, 1024), (1, 1024), 0), reinterpret_tensor(primals_5, (1024, 1024), (1, 1024), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, 1024) self.gc2 = GraphConvolution(1024, 1024) self.gc3 = GraphConvolution(1024, 1024) self.gc4 = GraphConvolution(1024, nhid) self.linear1 = nn.Linear(nhid, 2048) self.linear2 = nn.Linear(2048, 2048) self.linear3 = nn.Linear(2048, 2048) self.linear4 = nn.Linear(2048, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_5 = self.gc2.weight primals_6 = self.gc2.bias primals_7 = self.gc3.weight primals_8 = self.gc3.bias primals_9 = self.gc4.weight primals_10 = self.gc4.bias primals_11 = self.linear1.weight primals_12 = self.linear1.bias primals_13 = self.linear2.weight primals_14 = self.linear2.bias primals_15 = self.linear3.weight primals_16 = self.linear3.bias primals_17 = self.linear4.weight primals_18 = self.linear4.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
jarvis08/gpackage-gcn-torch
GCN
false
12,634
[ "MIT" ]
0
5e483ea3012dfd0f23b194519c1295e3efcbdc35
https://github.com/jarvis08/gpackage-gcn-torch/tree/5e483ea3012dfd0f23b194519c1295e3efcbdc35
TransformerDecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vh/cvhiot6d7wcfsuy6rh4gl4pcavsvp2otvw6mzmz3nqtaqwsotole.py # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%view_12, [1, 4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_1 = async_compile.triton('triton_poi_fused_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i1', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = xindex tmp0 = x0 + ((-1)*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 == tmp4 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/37/c37im5gvzgd54i6pwdkzbzagjgm2hlvyuufc537slxun4obyhoes.py # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights => exp, sum_1 # masked_fill_ => full_default_2, where_1 # Graph fragment: # %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_13, %full_default_2, %bmm), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [-1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + (x0), tmp20, xmask) tl.store(out_ptr1 + (x0), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s7/cs7tkqbp32zttfoqu3noo4dzger63apkogo36sj6lkru33bw54tm.py # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights => div_1, exp # masked_fill_ => full_default_2, where_1 # Graph fragment: # %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_13, %full_default_2, %bmm), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [-1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_masked_fill_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s7/cs7p2dyxlesdvuyx4owztmqg5sapsarlgzaivin7okeoe6lxygw7.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_18, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_native_layer_norm_5 = async_compile.triton('triton_poi_fused_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/y6/cy6mkjdwes62jaih4dzebyknvxezhquh37cme5cflrxbxff3z675.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add_1, add_2, mul, mul_1, rsqrt, sub_2 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_18, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_10), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_11), kwargs = {}) triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/36/c367rcv2obfrr3jnd6iifol46chcr6me3cdxpbe7jwum3jppqkna.py # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat_1 => repeat_1 # Graph fragment: # %repeat_1 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%view_32, [1, 4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_7 = async_compile.triton('triton_poi_fused_repeat_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6h/c6hd5ncxoecwkbb7fby7ebnhqlwhh4mzmufikxyk5aouosneh42d.py # Topologically Sorted Source Nodes: [masked_fill_, masked_fill__1, attn_weights_1], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights_1 => exp_1, sum_2 # masked_fill_ => full_default_2 # masked_fill__1 => where_2 # Graph fragment: # %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_33, %full_default_2, %bmm_2), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) triton_poi_fused__softmax_masked_fill_8 = async_compile.triton('triton_poi_fused__softmax_masked_fill_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s5/cs5gdmm3aovofwif2w4uihyrz2dnsg43d5yononneqsiaiy2q2eu.py # Topologically Sorted Source Nodes: [masked_fill_, masked_fill__1, attn_weights_1], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights_1 => div_3, exp_1 # masked_fill_ => full_default_2 # masked_fill__1 => where_2 # Graph fragment: # %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_33, %full_default_2, %bmm_2), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) triton_poi_fused__softmax_masked_fill_9 = async_compile.triton('triton_poi_fused__softmax_masked_fill_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex x4 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x3), xmask) tmp6 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/cn/ccn3ztml5qupb2u6trhxfni5siq2chr5f7il63gpx775u3hi6iz5.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add] # Source node to ATen node mapping: # x_2 => add_3 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %view_37), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s5/cs5qfpz5dtrtoopym4yheekk2kuetbhcvypaqjhns7stzzfhkjha.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_38, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_11 = async_compile.triton('triton_poi_fused_native_layer_norm_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dr/cdrqch7dkzd6wgbo46meh7vw2wmmm5avruav7pzwc4s4i4zleekn.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_4, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_38, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_38, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_22), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_23), kwargs = {}) triton_poi_fused_native_layer_norm_12 = async_compile.triton('triton_poi_fused_native_layer_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/d2/cd2jzht7h5xjxoygvrqqi5z6h7ymzldxrfdzzcr3hgci55wxxltm.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_4 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_41,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_13 = async_compile.triton('triton_poi_fused_relu_threshold_backward_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_13(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4, ), (1, )) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4, 4), (4, 1)) assert_size_stride(primals_21, (4, ), (1, )) assert_size_stride(primals_22, (4, ), (1, )) assert_size_stride(primals_23, (4, ), (1, )) assert_size_stride(primals_24, (4, 4), (4, 1)) assert_size_stride(primals_25, (4, ), (1, )) assert_size_stride(primals_26, (4, 4), (4, 1)) assert_size_stride(primals_27, (4, ), (1, )) assert_size_stride(primals_28, (4, ), (1, )) assert_size_stride(primals_29, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf3, primals_7, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_5, buf5, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_prod], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] triton_poi_fused_repeat_1.run(buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0); del buf2 # reuse buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_2.run(buf7, buf6, buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_3.run(buf10, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [attentioned], Original ATen: [aten.bmm] extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_9 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_5.run(buf13, primals_1, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_6.run(buf13, primals_1, buf14, buf15, primals_10, primals_11, buf16, 64, grid=grid(64), stream=stream0) del primals_11 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_4], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf17, primals_14, buf18, 16, 4, grid=grid(16, 4), stream=stream0) del primals_14 buf19 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) del primals_15 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf20) del primals_17 buf21 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_6], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf20, primals_18, buf21, 16, 4, grid=grid(16, 4), stream=stream0) del primals_18 buf22 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf20 # reuse # Topologically Sorted Source Nodes: [contiguous_5], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf19, primals_16, buf22, 16, 4, grid=grid(16, 4), stream=stream0) del primals_16 buf23 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_prod_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 0, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] triton_poi_fused_repeat_7.run(primals_19, buf24, 64, grid=grid(64), stream=stream0) del primals_19 buf25 = reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 64), 0); del buf19 # reuse buf26 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [masked_fill_, masked_fill__1, attn_weights_1], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_8.run(buf24, buf23, buf25, buf26, 64, grid=grid(64), stream=stream0) buf27 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [masked_fill_, masked_fill__1, attn_weights_1], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_9.run(buf27, buf24, buf25, buf26, 256, grid=grid(256), stream=stream0) buf28 = reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 1), 0); del buf26 # reuse # Topologically Sorted Source Nodes: [attentioned_2], Original ATen: [aten.bmm] extern_kernels.bmm(buf27, reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 0), 0), out=buf28) buf29 = reinterpret_tensor(buf25, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [contiguous_7], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf28, buf29, 16, 4, grid=grid(16, 4), stream=stream0) buf30 = reinterpret_tensor(buf28, (16, 4), (4, 1), 0); del buf28 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0); del buf30 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf31, buf16, primals_21, 64, grid=grid(64), stream=stream0) del primals_21 buf32 = buf15; del buf15 # reuse buf33 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_11.run(buf31, buf32, buf33, 16, grid=grid(16), stream=stream0) buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_12.run(buf31, buf32, buf33, primals_22, primals_23, buf34, 64, grid=grid(64), stream=stream0) del primals_23 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf34, reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf35) buf36 = reinterpret_tensor(buf35, (4, 4, 4), (16, 4, 1), 0); del buf35 # reuse buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_13.run(buf36, primals_25, buf42, 64, grid=grid(64), stream=stream0) del primals_25 buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(primals_26, (4, 4), (1, 4), 0), out=buf37) buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0); del buf37 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf38, buf34, primals_27, 64, grid=grid(64), stream=stream0) del primals_27 buf39 = buf33; del buf33 # reuse buf40 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_11.run(buf38, buf39, buf40, 16, grid=grid(16), stream=stream0) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_12.run(buf38, buf39, buf40, primals_28, primals_29, buf41, 64, grid=grid(64), stream=stream0) del buf39 del buf40 del primals_29 return (reinterpret_tensor(buf41, (4, 4, 4), (16, 4, 1), 0), primals_1, primals_10, primals_22, primals_28, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(primals_12, (16, 4), (4, 1), 0), buf24, buf27, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(buf31, (16, 4), (4, 1), 0), buf34, reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(buf38, (16, 4), (4, 1), 0), primals_26, buf42, primals_24, primals_20, reinterpret_tensor(buf21, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf18, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0), primals_13, primals_8, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch.nn.functional as F def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, dropout=0): super(TransformerFFN, self).__init__() self.dropout = nn.Dropout(p=dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.dropout(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerDecoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def forward(self, x, encoder_output, encoder_mask): decoder_mask = self._create_selfattn_mask(x) residual = x x = self.self_attention(query=x, mask=decoder_mask) x = x + residual x = _normalize(x, self.norm1) residual = x x = self.encoder_attention(query=x, key=encoder_output, value= encoder_output, mask=encoder_mask) x = residual + x x = _normalize(x, self.norm2) residual = x x = self.ffn(x) x = residual + x x = _normalize(x, self.norm3) return x def _create_selfattn_mask(self, x): bsz = x.size(0) time = x.size(1) mask = torch.tril(x.new(time, time).fill_(1)) mask = mask.unsqueeze(0).expand(bsz, -1, -1) return mask def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex tmp0 = x0 + -1 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 == tmp4 tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_repeat_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_13(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4, 4), (4, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4, 4), (4, 1)) assert_size_stride(primals_25, (4,), (1,)) assert_size_stride(primals_26, (4, 4), (4, 1)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_7, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_5, buf5, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(256)](buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_9 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](buf13, primals_1, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](buf13, primals_1, buf14, buf15, primals_10, primals_11, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf17, primals_14, buf18, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_14 buf19 = buf17 del buf17 extern_kernels.mm(reinterpret_tensor(primals_12, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) del primals_15 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_12, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf20) del primals_17 buf21 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf20, primals_18, buf21, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_18 buf22 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf20 triton_poi_fused_clone_0[grid(16, 4)](buf19, primals_16, buf22, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_16 buf23 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 0, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_7[grid(64)](primals_19, buf24, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_19 buf25 = reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 64), 0) del buf19 buf26 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_8[grid(64)](buf24, buf23, buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = buf23 del buf23 triton_poi_fused__softmax_masked_fill_9[grid(256)](buf27, buf24, buf25, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1) buf28 = reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 1), 0) del buf26 extern_kernels.bmm(buf27, reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 0), 0), out=buf28) buf29 = reinterpret_tensor(buf25, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf25 triton_poi_fused_clone_4[grid(16, 4)](buf28, buf29, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf30 = reinterpret_tensor(buf28, (16, 4), (4, 1), 0) del buf28 extern_kernels.mm(reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0) del buf30 triton_poi_fused_add_10[grid(64)](buf31, buf16, primals_21, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_21 buf32 = buf15 del buf15 buf33 = buf14 del buf14 triton_poi_fused_native_layer_norm_11[grid(16)](buf31, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf31, buf32, buf33, primals_22, primals_23, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_23 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf34, reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf35) buf36 = reinterpret_tensor(buf35, (4, 4, 4), (16, 4, 1), 0) del buf35 buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_13[grid(64)](buf36, primals_25, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_25 buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(primals_26, (4, 4), (1, 4), 0), out=buf37) buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0) del buf37 triton_poi_fused_add_10[grid(64)](buf38, buf34, primals_27, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_27 buf39 = buf33 del buf33 buf40 = buf32 del buf32 triton_poi_fused_native_layer_norm_11[grid(16)](buf38, buf39, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf38, buf39, buf40, primals_28, primals_29, buf41, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf39 del buf40 del primals_29 return (reinterpret_tensor(buf41, (4, 4, 4), (16, 4, 1), 0), primals_1, primals_10, primals_22, primals_28, buf7, buf10, reinterpret_tensor (buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor( primals_12, (16, 4), (4, 1), 0), buf24, buf27, reinterpret_tensor( buf29, (16, 4), (4, 1), 0), reinterpret_tensor(buf31, (16, 4), (4, 1), 0), buf34, reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(buf38, (16, 4), (4, 1), 0), primals_26, buf42, primals_24, primals_20, reinterpret_tensor(buf21, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf18, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0), primals_13, primals_8, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, dropout=0): super(TransformerFFN, self).__init__() self.dropout = nn.Dropout(p=dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.dropout(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerDecoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def _create_selfattn_mask(self, x): bsz = x.size(0) time = x.size(1) mask = torch.tril(x.new(time, time).fill_(1)) mask = mask.unsqueeze(0).expand(bsz, -1, -1) return mask def forward(self, input_0, input_1, input_2): primals_2 = self.self_attention.q_lin.weight primals_3 = self.self_attention.q_lin.bias primals_4 = self.self_attention.k_lin.weight primals_5 = self.self_attention.k_lin.bias primals_6 = self.self_attention.v_lin.weight primals_7 = self.self_attention.v_lin.bias primals_8 = self.self_attention.out_lin.weight primals_9 = self.self_attention.out_lin.bias primals_10 = self.norm1.weight primals_11 = self.norm1.bias primals_13 = self.encoder_attention.q_lin.weight primals_14 = self.encoder_attention.q_lin.bias primals_15 = self.encoder_attention.k_lin.weight primals_16 = self.encoder_attention.k_lin.bias primals_17 = self.encoder_attention.v_lin.weight primals_18 = self.encoder_attention.v_lin.bias primals_19 = self.encoder_attention.out_lin.weight primals_21 = self.encoder_attention.out_lin.bias primals_22 = self.norm2.weight primals_23 = self.norm2.bias primals_20 = self.ffn.lin1.weight primals_25 = self.ffn.lin1.bias primals_24 = self.ffn.lin2.weight primals_27 = self.ffn.lin2.bias primals_28 = self.norm3.weight primals_29 = self.norm3.bias primals_1 = input_0 primals_12 = input_1 primals_26 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return output[0]
jinjiren/ParlAI
TransformerDecoderLayer
false
12,635
[ "MIT" ]
0
40799aeee69f2a0bb25a1341bb8da0c44861268e
https://github.com/jinjiren/ParlAI/tree/40799aeee69f2a0bb25a1341bb8da0c44861268e
AugCNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ur/curzudn4ai4j7lgrmbqwy57jpcw3gylwk4nkg6jt7lqh577w5ku7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 49152, grid=grid(49152), stream=stream0) del primals_3 return (buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def apply_init_(modules): """ Initialize NN modules """ for m in modules: if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) class Conv2d_tf(nn.Conv2d): """ Conv2d with the padding behavior from TF """ def __init__(self, *args, **kwargs): super(Conv2d_tf, self).__init__(*args, **kwargs) self.padding = kwargs.get('padding', 'SAME') def _compute_padding(self, input, dim): input_size = input.size(dim + 2) filter_size = self.weight.size(dim + 2) effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1 out_size = (input_size + self.stride[dim] - 1) // self.stride[dim] total_padding = max(0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size) additional_padding = int(total_padding % 2 != 0) return additional_padding, total_padding def forward(self, input): if self.padding == 'VALID': return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups) rows_odd, padding_rows = self._compute_padding(input, dim=0) cols_odd, padding_cols = self._compute_padding(input, dim=1) if rows_odd or cols_odd: input = F.pad(input, [0, cols_odd, 0, rows_odd]) return F.conv2d(input, self.weight, self.bias, self.stride, padding =(padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) class AugCNN(nn.Module): """ Convolutional Neural Network used as Augmentation """ def __init__(self): super(AugCNN, self).__init__() self.aug = Conv2d_tf(3, 3, kernel_size=3) apply_init_(self.modules()) self.train() def forward(self, obs): return self.aug(obs) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_3 return buf1, primals_1, primals_2 def apply_init_(modules): """ Initialize NN modules """ for m in modules: if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) class Conv2d_tf(nn.Conv2d): """ Conv2d with the padding behavior from TF """ def __init__(self, *args, **kwargs): super(Conv2d_tf, self).__init__(*args, **kwargs) self.padding = kwargs.get('padding', 'SAME') def _compute_padding(self, input, dim): input_size = input.size(dim + 2) filter_size = self.weight.size(dim + 2) effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1 out_size = (input_size + self.stride[dim] - 1) // self.stride[dim] total_padding = max(0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size) additional_padding = int(total_padding % 2 != 0) return additional_padding, total_padding def forward(self, input): if self.padding == 'VALID': return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups) rows_odd, padding_rows = self._compute_padding(input, dim=0) cols_odd, padding_cols = self._compute_padding(input, dim=1) if rows_odd or cols_odd: input = F.pad(input, [0, cols_odd, 0, rows_odd]) return F.conv2d(input, self.weight, self.bias, self.stride, padding =(padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) class AugCNNNew(nn.Module): """ Convolutional Neural Network used as Augmentation """ def __init__(self): super(AugCNNNew, self).__init__() self.aug = Conv2d_tf(3, 3, kernel_size=3) apply_init_(self.modules()) self.train() def forward(self, input_0): primals_2 = self.aug.weight primals_3 = self.aug.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
joshnroy/contrastive-rl
AugCNN
false
12,636
[ "MIT" ]
0
d0e8cd8fd6963983dc62dd282b788002a892704e
https://github.com/joshnroy/contrastive-rl/tree/d0e8cd8fd6963983dc62dd282b788002a892704e
GlobalAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [attn_weights_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_weights_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [attn_weights_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_weights_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) assert_size_stride(primals_6, (8, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [query], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [c], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (32, 1, 8), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_weights_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [attn_weights_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, reinterpret_tensor(buf1, (4, 4, 4), (32, 8, 1), 4), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 return (reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (32, 1, 8), 4), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (32, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class GlobalAttention(nn.Module): """ Global Attention between encoder and decoder """ def __init__(self, key_features, query_features, value_features, hidden_features=None, dropout=0.0): """ Args: key_features: int dimension of keys query_features: int dimension of queries value_features: int dimension of values (outputs) hidden_features: int dimension of hidden states (default value_features) dropout: float dropout rate """ super(GlobalAttention, self).__init__() if hidden_features is None: hidden_features = value_features self.key_proj = nn.Linear(key_features, 2 * hidden_features, bias=True) self.query_proj = nn.Linear(query_features, hidden_features, bias=True) self.dropout = dropout self.fc = nn.Linear(hidden_features, value_features) self.hidden_features = hidden_features self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.key_proj.weight) nn.init.constant_(self.key_proj.bias, 0) nn.init.xavier_uniform_(self.query_proj.weight) nn.init.constant_(self.query_proj.bias, 0) nn.init.xavier_uniform_(self.fc.weight) nn.init.constant_(self.fc.bias, 0) def forward(self, query, key, key_mask=None): """ Args: query: Tensor query tensor [batch, query_length, query_features] key: Tensor key tensor [batch, key_length, key_features] key_mask: ByteTensor or None binary ByteTensor [batch, src_len] padding elements are indicated by 1s. Returns: Tensor value tensor [batch, query_length, value_features] """ bs, timesteps, _ = key.size() dim = self.hidden_features query = self.query_proj(query) c = self.key_proj(key) c = c.view(bs, timesteps, 2, dim) key = c[:, :, 0] value = c[:, :, 1] attn_weights = torch.bmm(query, key.transpose(1, 2)) if key_mask is not None: attn_weights = attn_weights.masked_fill(key_mask.unsqueeze(1), float('-inf')) attn_weights = F.softmax(attn_weights.float(), dim=-1, dtype=torch. float32 if attn_weights.dtype == torch.float16 else attn_weights.dtype) out = torch.bmm(attn_weights, value) out = F.dropout(self.fc(out), p=self.dropout, training=self.training) return out def init(self, query, key, key_mask=None, init_scale=1.0): with torch.no_grad(): return self(query, key, key_mask=key_mask) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'key_features': 4, 'query_features': 4, 'value_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) assert_size_stride(primals_6, (8,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (32, 1, 8), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(buf4, reinterpret_tensor(buf1, (4, 4, 4), (32, 8, 1), 4), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (32, 1, 8), 4 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 4), (32, 8, 1), 0) class GlobalAttentionNew(nn.Module): """ Global Attention between encoder and decoder """ def __init__(self, key_features, query_features, value_features, hidden_features=None, dropout=0.0): """ Args: key_features: int dimension of keys query_features: int dimension of queries value_features: int dimension of values (outputs) hidden_features: int dimension of hidden states (default value_features) dropout: float dropout rate """ super(GlobalAttentionNew, self).__init__() if hidden_features is None: hidden_features = value_features self.key_proj = nn.Linear(key_features, 2 * hidden_features, bias=True) self.query_proj = nn.Linear(query_features, hidden_features, bias=True) self.dropout = dropout self.fc = nn.Linear(hidden_features, value_features) self.hidden_features = hidden_features self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.key_proj.weight) nn.init.constant_(self.key_proj.bias, 0) nn.init.xavier_uniform_(self.query_proj.weight) nn.init.constant_(self.query_proj.bias, 0) nn.init.xavier_uniform_(self.fc.weight) nn.init.constant_(self.fc.bias, 0) def init(self, query, key, key_mask=None, init_scale=1.0): with torch.no_grad(): return self(query, key, key_mask=key_mask) def forward(self, input_0, input_1): primals_5 = self.key_proj.weight primals_6 = self.key_proj.bias primals_2 = self.query_proj.weight primals_3 = self.query_proj.bias primals_7 = self.fc.weight primals_8 = self.fc.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
juheeuu/flowseq
GlobalAttention
false
12,637
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
MultiheadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vb/cvbxdocetk5tj67qrxlvwxnmwf3z2efb37af24dzpjl5kw5bkmq5.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (1 + (3*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (1 + (3*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/q6/cq6z6xhbr2gyiw5id4lceemepctrgo25eo5gvvxlgbncy77qxm4p.py # Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax] # Source node to ATen node mapping: # mul => mul # softmax => amax, clone, exp, sub, sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 1.0), kwargs = {}) # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_mul_1 = async_compile.triton('triton_poi_fused__softmax_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (3*x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (3*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (8 + x0 + (16*x3)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + (12 + x0 + (16*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp4 * tmp7 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp4 * tmp10 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp4 * tmp13 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp16 = tmp6 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp8 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr1 + (x4), tmp15, xmask) tl.store(out_ptr2 + (x4), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qh/cqhxytgzmsgejzq2rbt7fdr5j2twqslr3w2cz4yhih2lbgvx6yly.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, clone, div, exp, sub, sum_1 # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = (xindex // 64) x2 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uq/cuqseg2vikrs3ttioriunrdhmp6cffp4xwpf2b6xdz7w3atftosj.py # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # einsum_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (2 + (3*x1) + (12*x0) + (48*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (2 + (3*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_2 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sz/csz5twnkhe2zz3eaafwmgidmezpgvs3favuuva73uud6rs7ouhj6.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add] # Source node to ATen node mapping: # x_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_5), kwargs = {}) triton_poi_fused_add_5 = async_compile.triton('triton_poi_fused_add_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf0, primals_2, buf2, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) # Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax] triton_poi_fused__softmax_mul_1.run(buf0, primals_2, buf2, buf1, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf1, buf2, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf0, primals_2, buf6, 64, grid=grid(64), stream=stream0) del buf0 del primals_2 buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add] triton_poi_fused_add_5.run(buf10, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 return (buf10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0), reinterpret_tensor(buf2, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch as th class QKVMultiheadAttention(nn.Module): def __init__(self, n_heads: 'int', n_ctx: 'int'): super().__init__() self.n_heads = n_heads self.n_ctx = n_ctx def forward(self, qkv): bs, n_ctx, width = qkv.shape attn_ch = width // self.n_heads // 3 scale = 1 / math.sqrt(math.sqrt(attn_ch)) qkv = qkv.view(bs, n_ctx, self.n_heads, -1) q, k, v = th.split(qkv, attn_ch, dim=-1) weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale) wdtype = weight.dtype weight = th.softmax(weight.float(), dim=-1).type(wdtype) return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1) class MultiheadAttention(nn.Module): def __init__(self, n_ctx, width, heads): super().__init__() self.n_ctx = n_ctx self.width = width self.heads = heads self.c_qkv = nn.Linear(width, width * 3) self.c_proj = nn.Linear(width, width) self.attention = QKVMultiheadAttention(heads, n_ctx) def forward(self, x): x = self.c_qkv(x) x = self.attention(x) x = self.c_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_ctx': 4, 'width': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = xindex // 16 tmp0 = tl.load(in_ptr0 + 3 * x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr2 + (8 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr2 + (12 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp4 * tmp7 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp4 * tmp10 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp4 * tmp13 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp16 = tmp6 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp8 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp15, xmask) tl.store(out_ptr2 + x4, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x2 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp7, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 + 3 * x1 + 12 * x0 + 48 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf0, primals_2, buf2, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) triton_poi_fused__softmax_mul_1[grid(64)](buf0, primals_2, buf2, buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf1, buf2, buf3, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf4 triton_poi_fused_clone_3[grid(64)](buf0, primals_2, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_2 buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0) del buf9 triton_poi_fused_add_5[grid(64)](buf10, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0 ), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0) class QKVMultiheadAttention(nn.Module): def __init__(self, n_heads: 'int', n_ctx: 'int'): super().__init__() self.n_heads = n_heads self.n_ctx = n_ctx def forward(self, qkv): bs, n_ctx, width = qkv.shape attn_ch = width // self.n_heads // 3 scale = 1 / math.sqrt(math.sqrt(attn_ch)) qkv = qkv.view(bs, n_ctx, self.n_heads, -1) q, k, v = th.split(qkv, attn_ch, dim=-1) weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale) wdtype = weight.dtype weight = th.softmax(weight.float(), dim=-1).type(wdtype) return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1) class MultiheadAttentionNew(nn.Module): def __init__(self, n_ctx, width, heads): super().__init__() self.n_ctx = n_ctx self.width = width self.heads = heads self.c_qkv = nn.Linear(width, width * 3) self.c_proj = nn.Linear(width, width) self.attention = QKVMultiheadAttention(heads, n_ctx) def forward(self, input_0): primals_1 = self.c_qkv.weight primals_2 = self.c_qkv.bias primals_4 = self.c_proj.weight primals_5 = self.c_proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
johnpaulbin/glide-text2im
MultiheadAttention
false
12,638
[ "MIT" ]
0
4897050c4c540316dfb1ec7e6ff95698bcb20487
https://github.com/johnpaulbin/glide-text2im/tree/4897050c4c540316dfb1ec7e6ff95698bcb20487
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/gn/cgn37hvkbudswcotxg2kd4nkddnptf52plxombym7nh7tjs5rbbc.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jk/cjkfw7q2cdmjf2ab4jynamfj22kvje5c7y7o3cr7nyj7yej65x5e.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = (xindex // 14) x4 = xindex x3 = (xindex // 3136) x5 = xindex % 3136 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4), tmp6, xmask) tl.store(out_ptr1 + (x5 + (3200*x3)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vi/cvii2lvgowi4ilvgcqvcouxsr3dqcg3yuq3ktwlv4tmvp2ll652d.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ui/cui3s4mqognsjp5q5f2iwenfoqbabemexibxzboo4z6bekyz74i4.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6m/c6m6u2ctjb4r4ra3sizrwezzkzegfp2ombflmfg3dwjfci2pen7h.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_4 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (120, 800), (800, 1)) assert_size_stride(primals_7, (120, ), (1, )) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 50176, grid=grid(50176), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 16, 14, 14), (3136, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 14, 14), (3200, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 12544, grid=grid(12544), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 10, 10), (3200, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 12800, grid=grid(12800), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 3200, grid=grid(3200), stream=stream0) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 800), (800, 1), 0), reinterpret_tensor(primals_6, (800, 120), (1, 800), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf11, primals_9, 336, grid=grid(336), stream=stream0) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 800), (800, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((120, 800), (800, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 32 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 x4 = xindex x3 = xindex // 3136 x5 = xindex % 3136 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x4, tmp6, xmask) tl.store(out_ptr1 + (x5 + 3200 * x3), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (120, 800), (800, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(50176)](buf1, primals_2, 50176, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 14, 14), (3136, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 14, 14), (3200, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(12544)](buf1, buf2, buf3, 12544, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 10, 10), (3200, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(12800)](buf5, primals_5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(3200)](buf5, buf6, buf7, 3200, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 800), (800, 1), 0), reinterpret_tensor(primals_6, (800, 120), (1, 800), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 800), (800, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
juliowaissman/cifar10-jwv
Net
false
12,639
[ "MIT" ]
0
a279ccf51f0e8cbacfcc34a9eee381c16ae536fc
https://github.com/juliowaissman/cifar10-jwv/tree/a279ccf51f0e8cbacfcc34a9eee381c16ae536fc
Disc
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7z/c7zsuucunqdovb2xa6tywxjxwmolzjzdk72ratro7fi3qvgyqb7c.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf5, primals_7, 64, grid=grid(64), stream=stream0) del primals_7 return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Disc(nn.Module): def __init__(self, N, z_dim): super(Disc, self).__init__() self.lin1 = nn.Linear(z_dim, N) self.lin2 = nn.Linear(N, N) self.lin3 = nn.Linear(N, 1) def forward(self, x): x = F.dropout(self.lin1(x), p=0.2, training=self.training) x = F.relu(x) x = F.dropout(self.lin2(x), p=0.2, training=self.training) x = F.relu(x) return F.sigmoid(self.lin3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'N': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_sigmoid_1[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7 class DiscNew(nn.Module): def __init__(self, N, z_dim): super(DiscNew, self).__init__() self.lin1 = nn.Linear(z_dim, N) self.lin2 = nn.Linear(N, N) self.lin3 = nn.Linear(N, 1) def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_6 = self.lin3.weight primals_7 = self.lin3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
junhahyung/Pytorch-Sketch-RNN
Disc
false
12,640
[ "MIT" ]
0
7aa82755fdfdb9bd36f8a83f1cfc0ade43e50a7a
https://github.com/junhahyung/Pytorch-Sketch-RNN/tree/7aa82755fdfdb9bd36f8a83f1cfc0ade43e50a7a
GumbelSoftMaxSampler
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/uj/cujzvosjzzutzoqirql6cbiqmbcuhtdxjhbt3hdz5pzyjfgksdco.py # Topologically Sorted Source Nodes: [gumbel_softmax], Original ATen: [aten.log, aten.neg, aten.add, aten._softmax] # Source node to ATen node mapping: # gumbel_softmax => add, exp, log, neg, sum_1 # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%exponential,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %neg), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_log_neg_0 = async_compile.triton('triton_poi_fused__softmax_add_log_neg_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_log_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_log_neg_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tl_math.log(tmp1) tmp3 = -tmp2 tmp4 = tmp0 + tmp3 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp9 = tl_math.log(tmp8) tmp10 = -tmp9 tmp11 = tmp7 + tmp10 tmp12 = tmp11 * tmp5 tmp13 = triton_helpers.maximum(tmp6, tmp12) tmp16 = tl_math.log(tmp15) tmp17 = -tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp18 * tmp5 tmp20 = triton_helpers.maximum(tmp13, tmp19) tmp23 = tl_math.log(tmp22) tmp24 = -tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25 * tmp5 tmp27 = triton_helpers.maximum(tmp20, tmp26) tmp28 = tmp6 - tmp27 tmp29 = tmp28 * tmp5 tmp30 = tl_math.exp(tmp29) tmp31 = tmp12 - tmp27 tmp32 = tmp31 * tmp5 tmp33 = tl_math.exp(tmp32) tmp34 = tmp30 + tmp33 tmp35 = tmp19 - tmp27 tmp36 = tmp35 * tmp5 tmp37 = tl_math.exp(tmp36) tmp38 = tmp34 + tmp37 tmp39 = tmp26 - tmp27 tmp40 = tmp39 * tmp5 tmp41 = tl_math.exp(tmp40) tmp42 = tmp38 + tmp41 tl.store(out_ptr0 + (x0), tmp27, xmask) tl.store(out_ptr1 + (x0), tmp42, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/t5/ct5xtvx4mfmgxekpeeljuys2qhyqwsvbciwzgzjuo4eyqtwu27bn.py # Topologically Sorted Source Nodes: [gumbel_softmax], Original ATen: [aten.log, aten.neg, aten.add, aten._softmax] # Source node to ATen node mapping: # gumbel_softmax => add, div_1, exp, log, neg # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%exponential,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %neg), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_add_log_neg_1 = async_compile.triton('triton_poi_fused__softmax_add_log_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_log_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_log_neg_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.log(tmp1) tmp3 = -tmp2 tmp4 = tmp0 + tmp3 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp5 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(in_out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [gumbel_softmax], Original ATen: [aten.exponential] buf1 = torch.ops.aten.exponential.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [gumbel_softmax], Original ATen: [aten.log, aten.neg, aten.add, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_add_log_neg_0.run(arg0_1, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [gumbel_softmax], Original ATen: [aten.log, aten.neg, aten.add, aten._softmax] triton_poi_fused__softmax_add_log_neg_1.run(buf5, arg0_1, buf3, buf4, 256, grid=grid(256), stream=stream0) del arg0_1 del buf3 del buf4 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import functional as F from torch import nn from typing import * class GumbelSoftMaxSampler(nn.Module): def __init__(self, hard=False): super().__init__() self.hard = hard def forward(self, logits): return F.gumbel_softmax(logits=logits, hard=self.hard) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from typing import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_add_log_neg_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tl_math.log(tmp1) tmp3 = -tmp2 tmp4 = tmp0 + tmp3 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp9 = tl_math.log(tmp8) tmp10 = -tmp9 tmp11 = tmp7 + tmp10 tmp12 = tmp11 * tmp5 tmp13 = triton_helpers.maximum(tmp6, tmp12) tmp16 = tl_math.log(tmp15) tmp17 = -tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp18 * tmp5 tmp20 = triton_helpers.maximum(tmp13, tmp19) tmp23 = tl_math.log(tmp22) tmp24 = -tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25 * tmp5 tmp27 = triton_helpers.maximum(tmp20, tmp26) tmp28 = tmp6 - tmp27 tmp29 = tmp28 * tmp5 tmp30 = tl_math.exp(tmp29) tmp31 = tmp12 - tmp27 tmp32 = tmp31 * tmp5 tmp33 = tl_math.exp(tmp32) tmp34 = tmp30 + tmp33 tmp35 = tmp19 - tmp27 tmp36 = tmp35 * tmp5 tmp37 = tl_math.exp(tmp36) tmp38 = tmp34 + tmp37 tmp39 = tmp26 - tmp27 tmp40 = tmp39 * tmp5 tmp41 = tl_math.exp(tmp40) tmp42 = tmp38 + tmp41 tl.store(out_ptr0 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp42, xmask) @triton.jit def triton_poi_fused__softmax_add_log_neg_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tl_math.log(tmp1) tmp3 = -tmp2 tmp4 = tmp0 + tmp3 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp5 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(in_out_ptr0 + x2, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = torch.ops.aten.exponential.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_log_neg_0[grid(64)](arg0_1, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf2 del buf2 triton_poi_fused__softmax_add_log_neg_1[grid(256)](buf5, arg0_1, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf3 del buf4 return buf5, class GumbelSoftMaxSamplerNew(nn.Module): def __init__(self, hard=False): super().__init__() self.hard = hard def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jvrana/deep-learning-guides
GumbelSoftMaxSampler
false
12,641
[ "MIT" ]
0
18b7a0808073dd7b345e7a683dd7ee89e97e47ce
https://github.com/jvrana/deep-learning-guides/tree/18b7a0808073dd7b345e7a683dd7ee89e97e47ce
Gaussian
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/x6/cx6jmnk3w4fyizcgdntrll7zx32lso2oe3pzrnaggvqp5atfgroz.py # Topologically Sorted Source Nodes: [neg, mul, exp], Original ATen: [aten.neg, aten.mul, aten.exp] # Source node to ATen node mapping: # exp => exp # mul => mul # neg => neg # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg0_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) triton_poi_fused_exp_mul_neg_0 = async_compile.triton('triton_poi_fused_exp_mul_neg_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -tmp0 tmp2 = tmp1 * tmp0 tmp3 = tl_math.exp(tmp2) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [neg, mul, exp], Original ATen: [aten.neg, aten.mul, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_mul_neg_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.utils.tensorboard import torch.utils.data class Gaussian(torch.nn.Module): """Gaussian activation""" def forward(self, x: 'Tensor') ->Tensor: return torch.exp(-x * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.tensorboard import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = tmp1 * tmp0 tmp3 = tl_math.exp(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GaussianNew(torch.nn.Module): """Gaussian activation""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
isayev/torchani
Gaussian
false
12,642
[ "MIT" ]
0
f8edffe384e2cb2eebe3a7e04faa01b6f5e26b37
https://github.com/isayev/torchani/tree/f8edffe384e2cb2eebe3a7e04faa01b6f5e26b37
NodeAdaptiveEncoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/2u/c2uscogbzv5w5toj2f5doql67p5kdfjl24e5exykcpjjl2uv3zyj.py # Topologically Sorted Source Nodes: [h, h_1, lt, zeros_like, where, gt, where_1, mul, add_1], Original ATen: [aten.add, aten.sigmoid, aten.lt, aten.zeros_like, aten.where, aten.gt, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # gt => gt # h => add # h_1 => sigmoid # lt => lt # mul => mul # where => where # where_1 => where_1 # zeros_like => full_default # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, %primals_3), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%primals_2, 0), kwargs = {}) # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %primals_2), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_2, 0), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %full_default, %primals_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %where_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %mul), kwargs = {}) triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0 = async_compile.triton('triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 < tmp1 tmp3 = tl.where(tmp2, tmp1, tmp0) tmp7 = tmp4 + tmp6 tmp8 = tl.sigmoid(tmp7) tmp9 = tmp0 > tmp1 tmp10 = tl.where(tmp9, tmp1, tmp0) tmp11 = tmp8 * tmp10 tmp12 = tmp3 + tmp11 tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h, h_1, lt, zeros_like, where, gt, where_1, mul, add_1], Original ATen: [aten.add, aten.sigmoid, aten.lt, aten.zeros_like, aten.where, aten.gt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0.run(primals_2, buf0, primals_3, buf1, 16, grid=grid(16), stream=stream0) return (buf1, primals_2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch as torch class NodeAdaptiveEncoder(nn.Module): def __init__(self, num_features, dropout=0.5): super(NodeAdaptiveEncoder, self).__init__() self.fc = nn.Parameter(torch.zeros(size=(num_features, 1))) nn.init.xavier_normal_(self.fc.data, gain=1.414) self.bf = nn.Parameter(torch.zeros(size=(1,))) self.dropout = torch.nn.Dropout(dropout) def forward(self, x): h = torch.mm(x, self.fc) + self.bf h = torch.sigmoid(h) h = self.dropout(h) return torch.where(x < 0, torch.zeros_like(x), x) + h * torch.where( x > 0, torch.zeros_like(x), x) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 < tmp1 tmp3 = tl.where(tmp2, tmp1, tmp0) tmp7 = tmp4 + tmp6 tmp8 = tl.sigmoid(tmp7) tmp9 = tmp0 > tmp1 tmp10 = tl.where(tmp9, tmp1, tmp0) tmp11 = tmp8 * tmp10 tmp12 = tmp3 + tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0[grid(16)]( primals_2, buf0, primals_3, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_2, primals_3, buf0 class NodeAdaptiveEncoderNew(nn.Module): def __init__(self, num_features, dropout=0.5): super(NodeAdaptiveEncoderNew, self).__init__() self.fc = nn.Parameter(torch.zeros(size=(num_features, 1))) nn.init.xavier_normal_(self.fc.data, gain=1.414) self.bf = nn.Parameter(torch.zeros(size=(1,))) self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.fc primals_3 = self.bf primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ckhui/cogdl
NodeAdaptiveEncoder
false
12,643
[ "MIT" ]
0
93bea17c2dc7084857cd0a4af8178c174965127c
https://github.com/ckhui/cogdl/tree/93bea17c2dc7084857cd0a4af8178c174965127c
InvertibleMultiHeadFlow
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/uq/cuqw6hybzoomh5v56h5ewjc2oy4pdtd2gnrbz7lo2fdxyu54f3af.py # Topologically Sorted Source Nodes: [sum_1, num, logdet_1], Original ATen: [aten.sum, aten.mul] # Source node to ATen node mapping: # logdet_1 => mul_1 # num => mul # sum_1 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_4, [1]), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, %mul), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (0)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp9 = tmp8 * tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) # Topologically Sorted Source Nodes: [linalg_slogdet], Original ATen: [aten._linalg_slogdet] buf1 = torch.ops.aten._linalg_slogdet.default(primals_2) del primals_2 buf3 = buf1[1] buf4 = buf1[2] buf5 = buf1[3] del buf1 buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) buf7 = buf6; del buf6 # reuse buf8 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sum_1, num, logdet_1], Original ATen: [aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(buf7, primals_3, buf3, buf8, 4, 64, grid=grid(4), stream=stream0) del buf3 del primals_3 return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf4, buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class InvertibleMultiHeadFlow(Flow): @staticmethod def _get_heads(in_features): units = [32, 16, 8] for unit in units: if in_features % unit == 0: return in_features // unit assert in_features < 8, 'features={}'.format(in_features) return 1 def __init__(self, in_features, heads=None, type='A', inverse=False): super(InvertibleMultiHeadFlow, self).__init__(inverse) self.in_features = in_features if heads is None: heads = InvertibleMultiHeadFlow._get_heads(in_features) self.heads = heads self.type = type assert in_features % heads == 0, 'features ({}) should be divided by heads ({})'.format( in_features, heads) assert type in ['A', 'B'], 'type should belong to [A, B]' self.weight = Parameter(torch.Tensor(in_features // heads, in_features // heads)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) def forward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ size = input.size() dim = input.dim() if self.type == 'A': out = input.view(*size[:-1], self.heads, self.in_features // self.heads) else: out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1) out = F.linear(out, self.weight) if self.type == 'B': out = out.transpose(-2, -1).contiguous() out = out.view(*size) _, logdet = torch.linalg.slogdet(self.weight) if dim > 2: num = mask.view(size[0], -1).sum(dim=1) * self.heads logdet = logdet * num return out, logdet def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ size = input.size() dim = input.dim() if self.type == 'A': out = input.view(*size[:-1], self.heads, self.in_features // self.heads) else: out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1) out = F.linear(out, self.weight_inv) if self.type == 'B': out = out.transpose(-2, -1).contiguous() out = out.view(*size) _, logdet = torch.linalg.slogdet(self.weight_inv) if dim > 2: num = mask.view(size[0], -1).sum(dim=1) * self.heads logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: with torch.no_grad(): return self.forward(data, mask) def extra_repr(self): return 'inverse={}, in_features={}, heads={}, type={}'.format(self. inverse, self.in_features, self.heads, self.type) @classmethod def from_params(cls, params: 'Dict') ->'InvertibleMultiHeadFlow': return InvertibleMultiHeadFlow(**params) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + 0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp9 = tmp8 * tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = torch.ops.aten._linalg_slogdet.default(primals_2) del primals_2 buf3 = buf1[1] buf4 = buf1[2] buf5 = buf1[3] del buf1 buf6 = empty_strided_cuda((4,), (1,), torch.float32) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](buf7, primals_3, buf3, buf8, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf3 del primals_3 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf4, buf5, buf7 class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class InvertibleMultiHeadFlowNew(Flow): @staticmethod def _get_heads(in_features): units = [32, 16, 8] for unit in units: if in_features % unit == 0: return in_features // unit assert in_features < 8, 'features={}'.format(in_features) return 1 def __init__(self, in_features, heads=None, type='A', inverse=False): super(InvertibleMultiHeadFlowNew, self).__init__(inverse) self.in_features = in_features if heads is None: heads = InvertibleMultiHeadFlowNew._get_heads(in_features) self.heads = heads self.type = type assert in_features % heads == 0, 'features ({}) should be divided by heads ({})'.format( in_features, heads) assert type in ['A', 'B'], 'type should belong to [A, B]' self.weight = Parameter(torch.Tensor(in_features // heads, in_features // heads)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ size = input.size() dim = input.dim() if self.type == 'A': out = input.view(*size[:-1], self.heads, self.in_features // self.heads) else: out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1) out = F.linear(out, self.weight_inv) if self.type == 'B': out = out.transpose(-2, -1).contiguous() out = out.view(*size) _, logdet = torch.linalg.slogdet(self.weight_inv) if dim > 2: num = mask.view(size[0], -1).sum(dim=1) * self.heads logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: with torch.no_grad(): return self.forward(data, mask) def extra_repr(self): return 'inverse={}, in_features={}, heads={}, type={}'.format(self. inverse, self.in_features, self.heads, self.type) @classmethod def from_params(cls, params: 'Dict') ->'InvertibleMultiHeadFlow': return InvertibleMultiHeadFlowNew(**params) def forward(self, input_0, input_1): primals_2 = self.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
juheeuu/flowseq
InvertibleMultiHeadFlow
false
12,644
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
is_she_mad
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/sr/csrxdjbtbkq5mhx4lx76hdeti625uy52jalpuc5xjwghomvl635m.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dh/cdhj4aozvvzkw7stzrqoauyoij3petwtvi4g4weydesiaurrughd.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xr/cxrxf4nkydknjv7xhdecpyrprhviagsqwicrk4lpp64qv2hkzaxp.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # out_5 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 200), (200, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 12800, grid=grid(12800), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 128), (1, 200), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 8192, grid=grid(8192), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf5, primals_7, 64, grid=grid(64), stream=stream0) del primals_7 return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), buf5, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class is_she_mad(nn.Module): def __init__(self, modality_size): super(is_she_mad, self).__init__() self.fc1 = nn.Linear(modality_size, 200) self.fc2 = nn.Linear(200, 128) self.fc3 = nn.Linear(128, 1) def forward(self, x): out = self.fc1(x) out = F.relu(out) out = self.fc2(out) out = F.relu(out) out = self.fc3(out) out = F.sigmoid(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'modality_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 200), (200, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(12800)](buf1, primals_2, buf7, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 128), (1, 200), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3, primals_5, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 200), (200, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 class is_she_madNew(nn.Module): def __init__(self, modality_size): super(is_she_madNew, self).__init__() self.fc1 = nn.Linear(modality_size, 200) self.fc2 = nn.Linear(200, 128) self.fc3 = nn.Linear(128, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jryzkns/IsSheMadAtMe
is_she_mad
false
12,645
[ "MIT" ]
0
7776fb9730dab56f42418460efa0c2dec3988e46
https://github.com/jryzkns/IsSheMadAtMe/tree/7776fb9730dab56f42418460efa0c2dec3988e46
ResidualAttentionBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ov/covnoswvamrduqnlpu7ioajrnn3hhte55stjgoorf2z6wjvkkv75.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_3, 1.0), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (1 + (3*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (1 + (3*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a3/ca3gcd62vddrpdf7wel6bpef44cv6vzsauaaigftsutida5ifj66.py # Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax] # Source node to ATen node mapping: # mul => mul_2 # softmax => amax, clone, exp, sub_1, sum_1 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, 1.0), kwargs = {}) # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_mul_3 = async_compile.triton('triton_poi_fused__softmax_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (3*x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (3*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (8 + x0 + (16*x3)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + (12 + x0 + (16*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp4 * tmp7 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp4 * tmp10 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp4 * tmp13 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp16 = tmp6 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp8 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr1 + (x4), tmp15, xmask) tl.store(out_ptr2 + (x4), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ry/crysvnpehef7t7qavdv4tjejxu2fplwpz5t7ziu4zobu2iuwbxgh.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, clone, div, exp, sub_1, sum_1 # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = (xindex // 64) x2 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vt/cvtpzj4f2hmyerpchbm5i4eart4tas2pfh5e56t2solbcays5o7r.py # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # einsum_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (2 + (3*x1) + (12*x0) + (48*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (2 + (3*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/4k/c4kxxzyxk45cygbwnqkt5vb2udxol67wuotkh6zmuwsinb63uprn.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_2 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wy/cwy4kxyjt4etqj66cdnstjhqnyeqdu5ftn4ptejfyamzau7n3xqh.py # Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => var_mean_1 # x_2 => add_2 # x_3 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2f/c2fol3uyk6gyusnfl52autx5hdhf3lnnchrftxvbnmmxfs5li5o4.py # Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_5, mul_6, rsqrt_1, sub_2 # x_2 => add_2 # x_3 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_6), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_8), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_9), kwargs = {}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7s/c7sh7255dxwfgfvzmp2g3slml6c3g5jdyjpq3rxua55pu5gg2wfd.py # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] # Source node to ATen node mapping: # gelu => add_6, erf, mul_7, mul_8, mul_9 # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 0.5), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_8,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %add_6), kwargs = {}) triton_poi_fused_gelu_9 = async_compile.triton('triton_poi_fused_gelu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7n/c7nsuy663pmek7olaj7xnomqpsspowavb2zqwzrjkaz42y7dnrtc.py # Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.add] # Source node to ATen node mapping: # x_2 => add_2 # x_3 => add_3 # x_4 => add_7 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_14), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16, ), (1, )) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, grid=grid(64), stream=stream0) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf3, primals_5, buf5, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) # Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax] triton_poi_fused__softmax_mul_3.run(buf3, primals_5, buf5, buf4, buf6, buf7, 64, grid=grid(64), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf4, buf5, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf3, primals_5, buf9, 64, grid=grid(64), stream=stream0) del buf3 del primals_5 buf10 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf12, primals_7, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_1, buf12, primals_7, buf13, buf14, primals_8, primals_9, buf15, 64, grid=grid(64), stream=stream0) del buf13 del buf14 del primals_9 buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_11 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] triton_poi_fused_gelu_9.run(buf16, buf17, 256, grid=grid(256), stream=stream0) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf19, primals_1, buf12, primals_7, primals_13, 64, grid=grid(64), stream=stream0) del primals_13 return (buf19, primals_1, primals_7, primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_12, primals_10, primals_6, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch as th class LayerNorm(nn.LayerNorm): """ Implementation that supports fp16 inputs but fp32 gains/biases. """ def forward(self, x: 'th.Tensor'): return super().forward(x.float()) class QKVMultiheadAttention(nn.Module): def __init__(self, n_heads: 'int', n_ctx: 'int'): super().__init__() self.n_heads = n_heads self.n_ctx = n_ctx def forward(self, qkv): bs, n_ctx, width = qkv.shape attn_ch = width // self.n_heads // 3 scale = 1 / math.sqrt(math.sqrt(attn_ch)) qkv = qkv.view(bs, n_ctx, self.n_heads, -1) q, k, v = th.split(qkv, attn_ch, dim=-1) weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale) wdtype = weight.dtype weight = th.softmax(weight.float(), dim=-1).type(wdtype) return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1) class MultiheadAttention(nn.Module): def __init__(self, n_ctx, width, heads): super().__init__() self.n_ctx = n_ctx self.width = width self.heads = heads self.c_qkv = nn.Linear(width, width * 3) self.c_proj = nn.Linear(width, width) self.attention = QKVMultiheadAttention(heads, n_ctx) def forward(self, x): x = self.c_qkv(x) x = self.attention(x) x = self.c_proj(x) return x class MLP(nn.Module): def __init__(self, width): super().__init__() self.width = width self.c_fc = nn.Linear(width, width * 4) self.c_proj = nn.Linear(width * 4, width) self.gelu = nn.GELU() def forward(self, x): return self.c_proj(self.gelu(self.c_fc(x))) class ResidualAttentionBlock(nn.Module): def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'): super().__init__() self.attn = MultiheadAttention(n_ctx, width, heads) self.ln_1 = LayerNorm(width) self.mlp = MLP(width) self.ln_2 = LayerNorm(width) def forward(self, x: 'th.Tensor'): x = x + self.attn(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_ctx': 4, 'width': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = xindex // 16 tmp0 = tl.load(in_ptr0 + 3 * x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr2 + (8 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr2 + (12 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp4 * tmp7 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp4 * tmp10 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp4 * tmp13 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp16 = tmp6 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp8 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp15, xmask) tl.store(out_ptr2 + x4, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x2 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp7, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 + 3 * x1 + 12 * x0 + 48 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_mul_2[grid(64)](buf3, primals_5, buf5, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) triton_poi_fused__softmax_mul_3[grid(64)](buf3, primals_5, buf5, buf4, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf4, buf5, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf7 triton_poi_fused_clone_5[grid(64)](buf3, primals_5, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_5 buf10 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0) del buf6 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12, primals_7, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_1, buf12, primals_7, buf13, buf14, primals_8, primals_9, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_9 buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_11 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_9[grid(256)](buf16, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_10[grid(64)](buf19, primals_1, buf12, primals_7, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 return buf19, primals_1, primals_7, primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0 ), primals_12, primals_10, primals_6, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), primals_4 class LayerNorm(nn.LayerNorm): """ Implementation that supports fp16 inputs but fp32 gains/biases. """ def forward(self, x: 'th.Tensor'): return super().forward(x.float()) class QKVMultiheadAttention(nn.Module): def __init__(self, n_heads: 'int', n_ctx: 'int'): super().__init__() self.n_heads = n_heads self.n_ctx = n_ctx def forward(self, qkv): bs, n_ctx, width = qkv.shape attn_ch = width // self.n_heads // 3 scale = 1 / math.sqrt(math.sqrt(attn_ch)) qkv = qkv.view(bs, n_ctx, self.n_heads, -1) q, k, v = th.split(qkv, attn_ch, dim=-1) weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale) wdtype = weight.dtype weight = th.softmax(weight.float(), dim=-1).type(wdtype) return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1) class MultiheadAttention(nn.Module): def __init__(self, n_ctx, width, heads): super().__init__() self.n_ctx = n_ctx self.width = width self.heads = heads self.c_qkv = nn.Linear(width, width * 3) self.c_proj = nn.Linear(width, width) self.attention = QKVMultiheadAttention(heads, n_ctx) def forward(self, x): x = self.c_qkv(x) x = self.attention(x) x = self.c_proj(x) return x class MLP(nn.Module): def __init__(self, width): super().__init__() self.width = width self.c_fc = nn.Linear(width, width * 4) self.c_proj = nn.Linear(width * 4, width) self.gelu = nn.GELU() def forward(self, x): return self.c_proj(self.gelu(self.c_fc(x))) class ResidualAttentionBlockNew(nn.Module): def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'): super().__init__() self.attn = MultiheadAttention(n_ctx, width, heads) self.ln_1 = LayerNorm(width) self.mlp = MLP(width) self.ln_2 = LayerNorm(width) def forward(self, input_0): primals_4 = self.attn.c_qkv.weight primals_5 = self.attn.c_qkv.bias primals_6 = self.attn.c_proj.weight primals_2 = self.attn.c_proj.bias primals_3 = self.ln_1.weight primals_7 = self.ln_1.bias primals_10 = self.mlp.c_fc.weight primals_11 = self.mlp.c_fc.bias primals_12 = self.mlp.c_proj.weight primals_8 = self.mlp.c_proj.bias primals_9 = self.ln_2.weight primals_13 = self.ln_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
johnpaulbin/glide-text2im
ResidualAttentionBlock
false
12,646
[ "MIT" ]
0
4897050c4c540316dfb1ec7e6ff95698bcb20487
https://github.com/johnpaulbin/glide-text2im/tree/4897050c4c540316dfb1ec7e6ff95698bcb20487
DistMultLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/sr/csruoi7sqrevxeki5xnmzsq2chzkh4wlbrtopsch5bdo3txfvzib.py # Topologically Sorted Source Nodes: [mul, mul_1, sum_1], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg2_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp7 = tmp5 * tmp6 tmp9 = tmp7 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 * tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 * tmp18 tmp21 = tmp19 * tmp20 tmp22 = tmp16 + tmp21 tl.store(out_ptr0 + (x0), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, sum_1], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mul_sum_0.run(arg0_1, arg1_1, arg2_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch as torch class DistMultLayer(nn.Module): def __init__(self): super(DistMultLayer, self).__init__() def forward(self, sub_emb, obj_emb, rel_emb): return torch.sum(sub_emb * obj_emb * rel_emb, dim=-1) def predict(self, sub_emb, obj_emb, rel_emb): return torch.matmul(sub_emb * rel_emb, obj_emb.t()) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp7 = tmp5 * tmp6 tmp9 = tmp7 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 * tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 * tmp18 tmp21 = tmp19 * tmp20 tmp22 = tmp16 + tmp21 tl.store(out_ptr0 + x0, tmp22, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class DistMultLayerNew(nn.Module): def __init__(self): super(DistMultLayerNew, self).__init__() def predict(self, sub_emb, obj_emb, rel_emb): return torch.matmul(sub_emb * rel_emb, obj_emb.t()) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ckhui/cogdl
DistMultLayer
false
12,647
[ "MIT" ]
0
93bea17c2dc7084857cd0a4af8178c174965127c
https://github.com/ckhui/cogdl/tree/93bea17c2dc7084857cd0a4af8178c174965127c
RobertaClassificationHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xb/cxblo4rcs4c2gaeo4g2lzb4lnf2hnk52rokjaibitqw2ujbex3da.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_4 => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 8), (8, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((8, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (8, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 4), (1, 8), 0), out=buf1) del primals_2 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_3, 32, grid=grid(32), stream=stream0) del primals_3 buf3 = empty_strided_cuda((8, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return (buf3, reinterpret_tensor(buf0, (8, 8), (8, 1), 0), buf2, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, 2) def forward(self, features, **kwargs): x = features[:, 0, :] x = x.reshape(-1, x.size(-1) * 2) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 8), (8, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((8, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (8, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 4), (1, 8), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 triton_poi_fused_tanh_1[grid(32)](buf2, primals_3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((8, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return buf3, reinterpret_tensor(buf0, (8, 8), (8, 1), 0), buf2, primals_4 class RobertaClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, 2) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
kamranazmat/CodeBERT
RobertaClassificationHead
false
12,648
[ "MIT" ]
0
109c1b58b96c61314a76563c6bd686bb09f86eab
https://github.com/kamranazmat/CodeBERT/tree/109c1b58b96c61314a76563c6bd686bb09f86eab
InvertibleLinearFlow
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/z3/cz3d4mvbdfvtvhu2qixqkdpw7wbfq3tqd7co4jcol2sqrohzng6n.py # Topologically Sorted Source Nodes: [num, logdet_1], Original ATen: [aten.sum, aten.mul] # Source node to ATen node mapping: # logdet_1 => mul # num => sum_1 # Graph fragment: # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_2, [1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, %sum_1), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp7 = tmp6 * tmp4 tl.store(out_ptr1 + (x0), tmp7, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) # Topologically Sorted Source Nodes: [linalg_slogdet], Original ATen: [aten._linalg_slogdet] buf1 = torch.ops.aten._linalg_slogdet.default(primals_2) del primals_2 buf3 = buf1[1] buf4 = buf1[2] buf5 = buf1[3] del buf1 buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) buf7 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [num, logdet_1], Original ATen: [aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(primals_3, buf3, buf6, buf7, 4, 64, grid=grid(4), stream=stream0) del buf3 del primals_3 return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf4, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class InvertibleLinearFlow(Flow): def __init__(self, in_features, inverse=False): super(InvertibleLinearFlow, self).__init__(inverse) self.in_features = in_features self.weight = Parameter(torch.Tensor(in_features, in_features)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) def forward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = F.linear(input, self.weight) _, logdet = torch.linalg.slogdet(self.weight) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = F.linear(input, self.weight_inv) _, logdet = torch.linalg.slogdet(self.weight_inv) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: with torch.no_grad(): return self.forward(data) def extra_repr(self): return 'inverse={}, in_features={}'.format(self.inverse, self. in_features) @classmethod def from_params(cls, params: 'Dict') ->'InvertibleLinearFlow': return InvertibleLinearFlow(**params) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp7 = tmp6 * tmp4 tl.store(out_ptr1 + x0, tmp7, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = torch.ops.aten._linalg_slogdet.default(primals_2) del primals_2 buf3 = buf1[1] buf4 = buf1[2] buf5 = buf1[3] del buf1 buf6 = empty_strided_cuda((4,), (1,), torch.float32) buf7 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](primals_3, buf3, buf6, buf7, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf3 del primals_3 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf7, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf4, buf5, buf6 class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class InvertibleLinearFlowNew(Flow): def __init__(self, in_features, inverse=False): super(InvertibleLinearFlowNew, self).__init__(inverse) self.in_features = in_features self.weight = Parameter(torch.Tensor(in_features, in_features)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = F.linear(input, self.weight_inv) _, logdet = torch.linalg.slogdet(self.weight_inv) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: with torch.no_grad(): return self.forward(data) def extra_repr(self): return 'inverse={}, in_features={}'.format(self.inverse, self. in_features) @classmethod def from_params(cls, params: 'Dict') ->'InvertibleLinearFlow': return InvertibleLinearFlowNew(**params) def forward(self, input_0, input_1): primals_2 = self.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
juheeuu/flowseq
InvertibleLinearFlow
false
12,649
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
ActNormFlow
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3t/c3tegjhab4p4hqm22zwhmg7geo53lf3hgtmb6ngf5ln3g4l4akaj.py # Topologically Sorted Source Nodes: [exp, mul, out, out_1], Original ATen: [aten.exp, aten.mul, aten.add] # Source node to ATen node mapping: # exp => exp # mul => mul # out => add # out_1 => mul_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %exp), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %unsqueeze), kwargs = {}) triton_poi_fused_add_exp_mul_0 = async_compile.triton('triton_poi_fused_add_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x5 = (xindex // 16) x6 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x0 + (4*x5)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + (x6), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hj/chjctndcct3uy5zpfj56qun4jpqjq3jp7qekxbult5tkzhebqoca.py # Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.sum] # Source node to ATen node mapping: # logdet => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_2, [0], True), kwargs = {}) triton_per_fused_sum_1 = async_compile.triton('triton_per_fused_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xw/cxwj4tauk4ppjfha7ldxck7ffhdnppggn3xisjfuxa7khdl5xvue.py # Topologically Sorted Source Nodes: [num, logdet_1], Original ATen: [aten.sum, aten.mul] # Source node to ATen node mapping: # logdet_1 => mul_2 # num => sum_2 # Graph fragment: # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_mul_sum_2 = async_compile.triton('triton_per_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp7 = tmp6 * tmp4 tl.store(out_ptr1 + (x0), tmp7, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp, mul, out, out_1], Original ATen: [aten.exp, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_exp_mul_0.run(primals_1, primals_2, primals_3, primals_4, buf0, 1024, grid=grid(1024), stream=stream0) del primals_3 buf1 = empty_strided_cuda((1, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.sum] triton_per_fused_sum_1.run(primals_2, buf1, 1, 4, grid=grid(1), stream=stream0) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [num, logdet_1], Original ATen: [aten.sum, aten.mul] triton_per_fused_mul_sum_2.run(primals_4, buf1, buf2, buf3, 4, 64, grid=grid(4), stream=stream0) del buf1 return (buf0, buf3, primals_1, primals_2, primals_4, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class ActNormFlow(Flow): def __init__(self, in_features, inverse=False): super(ActNormFlow, self).__init__(inverse) self.in_features = in_features self.log_scale = Parameter(torch.Tensor(in_features)) self.bias = Parameter(torch.Tensor(in_features)) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.log_scale, mean=0, std=0.05) nn.init.constant_(self.bias, 0.0) def forward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = input * self.log_scale.exp() + self.bias out = out * mask.unsqueeze(dim - 1) logdet = self.log_scale.sum(dim=0, keepdim=True) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = (input - self.bias) * mask.unsqueeze(dim - 1) out = out.div(self.log_scale.exp() + 1e-08) logdet = self.log_scale.sum(dim=0, keepdim=True) * -1.0 if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: data: input: Tensor input tensor [batch, N1, N2, ..., in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] init_scale: float initial scale Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ with torch.no_grad(): out, _ = self.forward(data, mask) mean = out.view(-1, self.in_features).mean(dim=0) std = out.view(-1, self.in_features).std(dim=0) inv_stdv = init_scale / (std + 1e-06) self.log_scale.add_(inv_stdv.log()) self.bias.add_(-mean).mul_(inv_stdv) return self.forward(data, mask) def extra_repr(self): return 'inverse={}, in_features={}'.format(self.inverse, self. in_features) @classmethod def from_params(cls, params: 'Dict') ->'ActNormFlow': return ActNormFlow(**params) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x5 = xindex // 16 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x0 + 4 * x5), xmask, eviction_policy='evict_last' ) tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_per_fused_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp7 = tmp6 * tmp4 tl.store(out_ptr1 + x0, tmp7, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(1024)](primals_1, primals_2, primals_3, primals_4, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_sum_1[grid(1)](primals_2, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_2[grid(4)](primals_4, buf1, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf1 return buf0, buf3, primals_1, primals_2, primals_4, buf2 class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class ActNormFlowNew(Flow): def __init__(self, in_features, inverse=False): super(ActNormFlowNew, self).__init__(inverse) self.in_features = in_features self.log_scale = Parameter(torch.Tensor(in_features)) self.bias = Parameter(torch.Tensor(in_features)) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.log_scale, mean=0, std=0.05) nn.init.constant_(self.bias, 0.0) def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = (input - self.bias) * mask.unsqueeze(dim - 1) out = out.div(self.log_scale.exp() + 1e-08) logdet = self.log_scale.sum(dim=0, keepdim=True) * -1.0 if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: data: input: Tensor input tensor [batch, N1, N2, ..., in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] init_scale: float initial scale Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ with torch.no_grad(): out, _ = self.forward(data, mask) mean = out.view(-1, self.in_features).mean(dim=0) std = out.view(-1, self.in_features).std(dim=0) inv_stdv = init_scale / (std + 1e-06) self.log_scale.add_(inv_stdv.log()) self.bias.add_(-mean).mul_(inv_stdv) return self.forward(data, mask) def extra_repr(self): return 'inverse={}, in_features={}'.format(self.inverse, self. in_features) @classmethod def from_params(cls, params: 'Dict') ->'ActNormFlow': return ActNormFlowNew(**params) def forward(self, input_0, input_1): primals_2 = self.log_scale primals_3 = self.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
juheeuu/flowseq
ActNormFlow
false
12,650
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
PrototypicalDecoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/q3/cq3hlsb3oxxxwpofddnblmd63fvwbw26vtzy3rdosoriaqbincal.py # Topologically Sorted Source Nodes: [dist, scores], Original ATen: [aten.stack, aten.neg] # Source node to ATen node mapping: # dist => cat # scores => neg # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sum_1, %sum_2, %sum_3, %sum_4], 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {}) triton_poi_fused_neg_stack_0 = async_compile.triton('triton_poi_fused_neg_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_neg_stack_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_neg_stack_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 16 x0 = xindex % 4 x2 = (xindex // 64) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.load(in_ptr0 + (16 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp10 = tmp9 - tmp6 tmp11 = tmp10 * tmp10 tmp12 = tmp8 + tmp11 tmp13 = tl.load(in_ptr0 + (32 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp14 = tmp13 - tmp6 tmp15 = tmp14 * tmp14 tmp16 = tmp12 + tmp15 tmp17 = tl.load(in_ptr0 + (48 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp18 = tmp17 - tmp6 tmp19 = tmp18 * tmp18 tmp20 = tmp16 + tmp19 tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp4, tmp20, tmp21) tmp23 = tmp0 >= tmp3 tmp24 = tl.full([1], 8, tl.int64) tmp25 = tmp0 < tmp24 tmp26 = tmp23 & tmp25 tmp27 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (64*x2)), tmp26 & xmask, other=0.0) tmp28 = tl.load(in_ptr1 + (4 + x0), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tmp27 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tl.load(in_ptr0 + (16 + x0 + (4*((-4) + x1)) + (64*x2)), tmp26 & xmask, other=0.0) tmp32 = tmp31 - tmp28 tmp33 = tmp32 * tmp32 tmp34 = tmp30 + tmp33 tmp35 = tl.load(in_ptr0 + (32 + x0 + (4*((-4) + x1)) + (64*x2)), tmp26 & xmask, other=0.0) tmp36 = tmp35 - tmp28 tmp37 = tmp36 * tmp36 tmp38 = tmp34 + tmp37 tmp39 = tl.load(in_ptr0 + (48 + x0 + (4*((-4) + x1)) + (64*x2)), tmp26 & xmask, other=0.0) tmp40 = tmp39 - tmp28 tmp41 = tmp40 * tmp40 tmp42 = tmp38 + tmp41 tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp26, tmp42, tmp43) tmp45 = tmp0 >= tmp24 tmp46 = tl.full([1], 12, tl.int64) tmp47 = tmp0 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (64*x2)), tmp48 & xmask, other=0.0) tmp50 = tl.load(in_ptr1 + (8 + x0), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp49 - tmp50 tmp52 = tmp51 * tmp51 tmp53 = tl.load(in_ptr0 + (16 + x0 + (4*((-8) + x1)) + (64*x2)), tmp48 & xmask, other=0.0) tmp54 = tmp53 - tmp50 tmp55 = tmp54 * tmp54 tmp56 = tmp52 + tmp55 tmp57 = tl.load(in_ptr0 + (32 + x0 + (4*((-8) + x1)) + (64*x2)), tmp48 & xmask, other=0.0) tmp58 = tmp57 - tmp50 tmp59 = tmp58 * tmp58 tmp60 = tmp56 + tmp59 tmp61 = tl.load(in_ptr0 + (48 + x0 + (4*((-8) + x1)) + (64*x2)), tmp48 & xmask, other=0.0) tmp62 = tmp61 - tmp50 tmp63 = tmp62 * tmp62 tmp64 = tmp60 + tmp63 tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype) tmp66 = tl.where(tmp48, tmp64, tmp65) tmp67 = tmp0 >= tmp46 tmp68 = tl.full([1], 16, tl.int64) tmp69 = tmp0 < tmp68 tmp70 = tl.load(in_ptr0 + (x0 + (4*((-12) + x1)) + (64*x2)), tmp67 & xmask, other=0.0) tmp71 = tl.load(in_ptr1 + (12 + x0), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp72 = tmp70 - tmp71 tmp73 = tmp72 * tmp72 tmp74 = tl.load(in_ptr0 + (16 + x0 + (4*((-12) + x1)) + (64*x2)), tmp67 & xmask, other=0.0) tmp75 = tmp74 - tmp71 tmp76 = tmp75 * tmp75 tmp77 = tmp73 + tmp76 tmp78 = tl.load(in_ptr0 + (32 + x0 + (4*((-12) + x1)) + (64*x2)), tmp67 & xmask, other=0.0) tmp79 = tmp78 - tmp71 tmp80 = tmp79 * tmp79 tmp81 = tmp77 + tmp80 tmp82 = tl.load(in_ptr0 + (48 + x0 + (4*((-12) + x1)) + (64*x2)), tmp67 & xmask, other=0.0) tmp83 = tmp82 - tmp71 tmp84 = tmp83 * tmp83 tmp85 = tmp81 + tmp84 tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype) tmp87 = tl.where(tmp67, tmp85, tmp86) tmp88 = tl.where(tmp48, tmp66, tmp87) tmp89 = tl.where(tmp26, tmp44, tmp88) tmp90 = tl.where(tmp4, tmp22, tmp89) tmp91 = -tmp90 tl.store(in_out_ptr0 + (x3), tmp91, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [dist, scores], Original ATen: [aten.stack, aten.neg] stream0 = get_raw_stream(0) triton_poi_fused_neg_stack_0.run(buf1, primals_1, primals_2, 256, grid=grid(256), stream=stream0) return (buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import typing from torch import Tensor from collections import Counter from typing import List from typing import Optional from typing import Union from torch.utils.data import Dataset import torch.utils.data.dataloader from torch import nn import torch.nn from torch.utils.data.dataset import Dataset from torch.utils.data import DataLoader def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) def arccosh(x): """Compute the arcosh, numerically stable.""" x = torch.clamp(x, min=1 + EPSILON) a = torch.log(x) b = torch.log1p(torch.sqrt(x * x - 1) / x) return a + b def _iter_dataset(dataset: 'Optional[Dataset]') ->typing.Iterable: if dataset is None: return [] return map(lambda x: x[0], DataLoader(dataset, batch_size=1, num_workers=0) ) def identify_dynamic_embeddings(data_point: 'DataPoint'): dynamic_embeddings = [] if isinstance(data_point, Sentence): first_token = data_point[0] for name, vector in first_token._embeddings.items(): if vector.requires_grad: dynamic_embeddings.append(name) for name, vector in data_point._embeddings.items(): if vector.requires_grad: dynamic_embeddings.append(name) return dynamic_embeddings def store_embeddings(data_points: 'Union[List[DT], Dataset]', storage_mode: 'str', dynamic_embeddings: 'Optional[List[str]]'=None): if isinstance(data_points, Dataset): data_points = list(_iter_dataset(data_points)) if storage_mode == 'none': dynamic_embeddings = None elif not dynamic_embeddings: dynamic_embeddings = identify_dynamic_embeddings(data_points[0]) for data_point in data_points: data_point.clear_embeddings(dynamic_embeddings) if storage_mode == 'cpu': str(flair.device) != 'cpu' for data_point in data_points: data_point def mdot(x, y): """Compute the inner product.""" m = x.new_ones(1, x.size(1)) m[0, 0] = -1 return torch.sum(m * x * y, 1, keepdim=True) def dist(x, y): """Get the hyperbolic distance between x and y.""" return arccosh(-mdot(x, y)) class CosineDistance(torch.nn.Module): def forward(self, a, b): return -dot_product(a, b, normalize=True) class EuclideanDistance(nn.Module): """Implement a EuclideanDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ _dist = [torch.sum((mat_1 - mat_2[i]) ** 2, dim=1) for i in range( mat_2.size(0))] dist = torch.stack(_dist, dim=1) return dist class HyperbolicDistance(nn.Module): """Implement a HyperbolicDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ mat_1_x_0 = torch.sqrt(1 + mat_1.pow(2).sum(dim=1, keepdim=True)) mat_2_x_0 = torch.sqrt(1 + mat_2.pow(2).sum(dim=1, keepdim=True)) left = mat_1_x_0.mm(mat_2_x_0.t()) right = mat_1[:, 1:].mm(mat_2[:, 1:].t()) return arccosh(left - right).pow(2) class LogitCosineDistance(torch.nn.Module): def forward(self, a, b): return torch.logit(0.5 - 0.5 * dot_product(a, b, normalize=True)) class NegativeScaledDotProduct(torch.nn.Module): def forward(self, a, b): sqrt_d = torch.sqrt(torch.tensor(a.size(-1))) return -dot_product(a, b, normalize=False) / sqrt_d class PrototypicalDecoder(torch.nn.Module): def __init__(self, num_prototypes: 'int', embeddings_size: 'int', prototype_size: 'Optional[int]'=None, distance_function: 'str'= 'euclidean', use_radius: 'Optional[bool]'=False, min_radius: 'Optional[int]'=0, unlabeled_distance: 'Optional[float]'=None, unlabeled_idx: 'Optional[int]'=None, learning_mode: 'Optional[str]' ='joint', normal_distributed_initial_prototypes: 'bool'=False): super().__init__() if not prototype_size: prototype_size = embeddings_size self.prototype_size = prototype_size self.metric_space_decoder: 'Optional[torch.nn.Linear]' = None if prototype_size != embeddings_size: self.metric_space_decoder = torch.nn.Linear(embeddings_size, prototype_size) torch.nn.init.xavier_uniform_(self.metric_space_decoder.weight) self.prototype_vectors = torch.nn.Parameter(torch.ones( num_prototypes, prototype_size), requires_grad=True) if normal_distributed_initial_prototypes: self.prototype_vectors = torch.nn.Parameter(torch.normal(torch. zeros(num_prototypes, prototype_size))) self.prototype_radii: 'Optional[torch.nn.Parameter]' = None if use_radius: self.prototype_radii = torch.nn.Parameter(torch.ones( num_prototypes), requires_grad=True) self.min_radius = min_radius self.learning_mode = learning_mode assert (unlabeled_idx is None) == (unlabeled_distance is None ), "'unlabeled_idx' and 'unlabeled_distance' should either both be set or both not be set." self.unlabeled_idx = unlabeled_idx self.unlabeled_distance = unlabeled_distance self._distance_function = distance_function self.distance: 'Optional[torch.nn.Module]' = None if distance_function.lower() == 'hyperbolic': self.distance = HyperbolicDistance() elif distance_function.lower() == 'cosine': self.distance = CosineDistance() elif distance_function.lower() == 'logit_cosine': self.distance = LogitCosineDistance() elif distance_function.lower() == 'euclidean': self.distance = EuclideanDistance() elif distance_function.lower() == 'dot_product': self.distance = NegativeScaledDotProduct() else: raise KeyError(f'Distance function {distance_function} not found.') self @property def num_prototypes(self): return self.prototype_vectors.size(0) def forward(self, embedded): if self.learning_mode == 'learn_only_map_and_prototypes': embedded = embedded.detach() if self.metric_space_decoder is not None: encoded = self.metric_space_decoder(embedded) else: encoded = embedded prot = self.prototype_vectors radii = self.prototype_radii if self.learning_mode == 'learn_only_prototypes': encoded = encoded.detach() if self.learning_mode == 'learn_only_embeddings_and_map': prot = prot.detach() if radii is not None: radii = radii.detach() distance = self.distance(encoded, prot) if radii is not None: distance /= self.min_radius + torch.nn.functional.softplus(radii) if self.unlabeled_distance: distance[..., self.unlabeled_idx] = self.unlabeled_distance scores = -distance return scores def enable_expectation_maximization(self, data: 'FlairDataset', encoder: 'DefaultClassifier', exempt_labels: 'List[str]'=[], mini_batch_size: 'int'=8): """Applies monkey-patch to train method (which sets the train flag). This allows for computation of average prototypes after a training sequence.""" decoder = self unpatched_train = encoder.train def patched_train(mode: 'bool'=True): unpatched_train(mode=mode) if mode: logger.info('recalculating prototypes') with torch.no_grad(): decoder.calculate_prototypes(data=data, encoder=encoder, exempt_labels=exempt_labels, mini_batch_size= mini_batch_size) encoder.train = patched_train def calculate_prototypes(self, data: 'FlairDataset', encoder: 'DefaultClassifier', exempt_labels: 'List[str]'=[], mini_batch_size=32 ): """ Function that calclues a prototype for each class based on the euclidean average embedding over the whole dataset :param data: dataset for which to calculate prototypes :param encoder: encoder to use :param exempt_labels: labels to exclude :param mini_batch_size: number of sentences to embed at same time :return: """ with torch.no_grad(): dataloader = DataLoader(data, batch_size=mini_batch_size) new_prototypes = torch.zeros(self.num_prototypes, self. prototype_size, device=flair.device) counter: 'Counter' = Counter() for batch in tqdm(dataloader): logits, labels = encoder.forward_pass(batch) if len(labels) > 0: if self.metric_space_decoder is not None: logits = self.metric_space_decoder(logits) for logit, label in zip(logits, labels): counter.update(label) idx = encoder.label_dictionary.get_idx_for_item(label [0]) new_prototypes[idx] += logit store_embeddings(batch, storage_mode='none') for label, count in counter.most_common(): average_prototype = new_prototypes[encoder.label_dictionary .get_idx_for_item(label)] / count new_prototypes[encoder.label_dictionary.get_idx_for_item(label) ] = average_prototype for label in exempt_labels: label_idx = encoder.label_dictionary.get_idx_for_item(label) new_prototypes[label_idx] = self.prototype_vectors[label_idx] self.prototype_vectors.data = new_prototypes def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_prototypes': 4, 'embeddings_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import typing from torch import Tensor from collections import Counter from typing import List from typing import Optional from typing import Union from torch.utils.data import Dataset import torch.utils.data.dataloader from torch import nn import torch.nn from torch.utils.data.dataset import Dataset from torch.utils.data import DataLoader assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_neg_stack_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x0 = xindex % 4 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp10 = tmp9 - tmp6 tmp11 = tmp10 * tmp10 tmp12 = tmp8 + tmp11 tmp13 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp14 = tmp13 - tmp6 tmp15 = tmp14 * tmp14 tmp16 = tmp12 + tmp15 tmp17 = tl.load(in_ptr0 + (48 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp18 = tmp17 - tmp6 tmp19 = tmp18 * tmp18 tmp20 = tmp16 + tmp19 tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp4, tmp20, tmp21) tmp23 = tmp0 >= tmp3 tmp24 = tl.full([1], 8, tl.int64) tmp25 = tmp0 < tmp24 tmp26 = tmp23 & tmp25 tmp27 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask, other=0.0) tmp28 = tl.load(in_ptr1 + (4 + x0), tmp26 & xmask, eviction_policy= 'evict_last', other=0.0) tmp29 = tmp27 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask, other=0.0) tmp32 = tmp31 - tmp28 tmp33 = tmp32 * tmp32 tmp34 = tmp30 + tmp33 tmp35 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask, other=0.0) tmp36 = tmp35 - tmp28 tmp37 = tmp36 * tmp36 tmp38 = tmp34 + tmp37 tmp39 = tl.load(in_ptr0 + (48 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask, other=0.0) tmp40 = tmp39 - tmp28 tmp41 = tmp40 * tmp40 tmp42 = tmp38 + tmp41 tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp26, tmp42, tmp43) tmp45 = tmp0 >= tmp24 tmp46 = tl.full([1], 12, tl.int64) tmp47 = tmp0 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 64 * x2), tmp48 & xmask, other=0.0) tmp50 = tl.load(in_ptr1 + (8 + x0), tmp48 & xmask, eviction_policy= 'evict_last', other=0.0) tmp51 = tmp49 - tmp50 tmp52 = tmp51 * tmp51 tmp53 = tl.load(in_ptr0 + (16 + x0 + 4 * (-8 + x1) + 64 * x2), tmp48 & xmask, other=0.0) tmp54 = tmp53 - tmp50 tmp55 = tmp54 * tmp54 tmp56 = tmp52 + tmp55 tmp57 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp48 & xmask, other=0.0) tmp58 = tmp57 - tmp50 tmp59 = tmp58 * tmp58 tmp60 = tmp56 + tmp59 tmp61 = tl.load(in_ptr0 + (48 + x0 + 4 * (-8 + x1) + 64 * x2), tmp48 & xmask, other=0.0) tmp62 = tmp61 - tmp50 tmp63 = tmp62 * tmp62 tmp64 = tmp60 + tmp63 tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype) tmp66 = tl.where(tmp48, tmp64, tmp65) tmp67 = tmp0 >= tmp46 tl.full([1], 16, tl.int64) tmp70 = tl.load(in_ptr0 + (x0 + 4 * (-12 + x1) + 64 * x2), tmp67 & xmask, other=0.0) tmp71 = tl.load(in_ptr1 + (12 + x0), tmp67 & xmask, eviction_policy= 'evict_last', other=0.0) tmp72 = tmp70 - tmp71 tmp73 = tmp72 * tmp72 tmp74 = tl.load(in_ptr0 + (16 + x0 + 4 * (-12 + x1) + 64 * x2), tmp67 & xmask, other=0.0) tmp75 = tmp74 - tmp71 tmp76 = tmp75 * tmp75 tmp77 = tmp73 + tmp76 tmp78 = tl.load(in_ptr0 + (32 + x0 + 4 * (-12 + x1) + 64 * x2), tmp67 & xmask, other=0.0) tmp79 = tmp78 - tmp71 tmp80 = tmp79 * tmp79 tmp81 = tmp77 + tmp80 tmp82 = tl.load(in_ptr0 + (48 + x0 + 4 * (-12 + x1) + 64 * x2), tmp67 & xmask, other=0.0) tmp83 = tmp82 - tmp71 tmp84 = tmp83 * tmp83 tmp85 = tmp81 + tmp84 tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype) tmp87 = tl.where(tmp67, tmp85, tmp86) tmp88 = tl.where(tmp48, tmp66, tmp87) tmp89 = tl.where(tmp26, tmp44, tmp88) tmp90 = tl.where(tmp4, tmp22, tmp89) tmp91 = -tmp90 tl.store(in_out_ptr0 + x3, tmp91, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_neg_stack_0[grid(256)](buf1, primals_1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, primals_1, primals_2 def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) def arccosh(x): """Compute the arcosh, numerically stable.""" x = torch.clamp(x, min=1 + EPSILON) a = torch.log(x) b = torch.log1p(torch.sqrt(x * x - 1) / x) return a + b def _iter_dataset(dataset: 'Optional[Dataset]') ->typing.Iterable: if dataset is None: return [] return map(lambda x: x[0], DataLoader(dataset, batch_size=1, num_workers=0) ) def identify_dynamic_embeddings(data_point: 'DataPoint'): dynamic_embeddings = [] if isinstance(data_point, Sentence): first_token = data_point[0] for name, vector in first_token._embeddings.items(): if vector.requires_grad: dynamic_embeddings.append(name) for name, vector in data_point._embeddings.items(): if vector.requires_grad: dynamic_embeddings.append(name) return dynamic_embeddings def store_embeddings(data_points: 'Union[List[DT], Dataset]', storage_mode: 'str', dynamic_embeddings: 'Optional[List[str]]'=None): if isinstance(data_points, Dataset): data_points = list(_iter_dataset(data_points)) if storage_mode == 'none': dynamic_embeddings = None elif not dynamic_embeddings: dynamic_embeddings = identify_dynamic_embeddings(data_points[0]) for data_point in data_points: data_point.clear_embeddings(dynamic_embeddings) if storage_mode == 'cpu': str(flair.device) != 'cpu' for data_point in data_points: data_point def mdot(x, y): """Compute the inner product.""" m = x.new_ones(1, x.size(1)) m[0, 0] = -1 return torch.sum(m * x * y, 1, keepdim=True) def dist(x, y): """Get the hyperbolic distance between x and y.""" return arccosh(-mdot(x, y)) class CosineDistance(torch.nn.Module): def forward(self, a, b): return -dot_product(a, b, normalize=True) class EuclideanDistance(nn.Module): """Implement a EuclideanDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ _dist = [torch.sum((mat_1 - mat_2[i]) ** 2, dim=1) for i in range( mat_2.size(0))] dist = torch.stack(_dist, dim=1) return dist class HyperbolicDistance(nn.Module): """Implement a HyperbolicDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ mat_1_x_0 = torch.sqrt(1 + mat_1.pow(2).sum(dim=1, keepdim=True)) mat_2_x_0 = torch.sqrt(1 + mat_2.pow(2).sum(dim=1, keepdim=True)) left = mat_1_x_0.mm(mat_2_x_0.t()) right = mat_1[:, 1:].mm(mat_2[:, 1:].t()) return arccosh(left - right).pow(2) class LogitCosineDistance(torch.nn.Module): def forward(self, a, b): return torch.logit(0.5 - 0.5 * dot_product(a, b, normalize=True)) class NegativeScaledDotProduct(torch.nn.Module): def forward(self, a, b): sqrt_d = torch.sqrt(torch.tensor(a.size(-1))) return -dot_product(a, b, normalize=False) / sqrt_d class PrototypicalDecoderNew(torch.nn.Module): def __init__(self, num_prototypes: 'int', embeddings_size: 'int', prototype_size: 'Optional[int]'=None, distance_function: 'str'= 'euclidean', use_radius: 'Optional[bool]'=False, min_radius: 'Optional[int]'=0, unlabeled_distance: 'Optional[float]'=None, unlabeled_idx: 'Optional[int]'=None, learning_mode: 'Optional[str]' ='joint', normal_distributed_initial_prototypes: 'bool'=False): super().__init__() if not prototype_size: prototype_size = embeddings_size self.prototype_size = prototype_size self.metric_space_decoder: 'Optional[torch.nn.Linear]' = None if prototype_size != embeddings_size: self.metric_space_decoder = torch.nn.Linear(embeddings_size, prototype_size) torch.nn.init.xavier_uniform_(self.metric_space_decoder.weight) self.prototype_vectors = torch.nn.Parameter(torch.ones( num_prototypes, prototype_size), requires_grad=True) if normal_distributed_initial_prototypes: self.prototype_vectors = torch.nn.Parameter(torch.normal(torch. zeros(num_prototypes, prototype_size))) self.prototype_radii: 'Optional[torch.nn.Parameter]' = None if use_radius: self.prototype_radii = torch.nn.Parameter(torch.ones( num_prototypes), requires_grad=True) self.min_radius = min_radius self.learning_mode = learning_mode assert (unlabeled_idx is None) == (unlabeled_distance is None ), "'unlabeled_idx' and 'unlabeled_distance' should either both be set or both not be set." self.unlabeled_idx = unlabeled_idx self.unlabeled_distance = unlabeled_distance self._distance_function = distance_function self.distance: 'Optional[torch.nn.Module]' = None if distance_function.lower() == 'hyperbolic': self.distance = HyperbolicDistance() elif distance_function.lower() == 'cosine': self.distance = CosineDistance() elif distance_function.lower() == 'logit_cosine': self.distance = LogitCosineDistance() elif distance_function.lower() == 'euclidean': self.distance = EuclideanDistance() elif distance_function.lower() == 'dot_product': self.distance = NegativeScaledDotProduct() else: raise KeyError(f'Distance function {distance_function} not found.') self @property def num_prototypes(self): return self.prototype_vectors.size(0) def enable_expectation_maximization(self, data: 'FlairDataset', encoder: 'DefaultClassifier', exempt_labels: 'List[str]'=[], mini_batch_size: 'int'=8): """Applies monkey-patch to train method (which sets the train flag). This allows for computation of average prototypes after a training sequence.""" decoder = self unpatched_train = encoder.train def patched_train(mode: 'bool'=True): unpatched_train(mode=mode) if mode: logger.info('recalculating prototypes') with torch.no_grad(): decoder.calculate_prototypes(data=data, encoder=encoder, exempt_labels=exempt_labels, mini_batch_size= mini_batch_size) encoder.train = patched_train def calculate_prototypes(self, data: 'FlairDataset', encoder: 'DefaultClassifier', exempt_labels: 'List[str]'=[], mini_batch_size=32 ): """ Function that calclues a prototype for each class based on the euclidean average embedding over the whole dataset :param data: dataset for which to calculate prototypes :param encoder: encoder to use :param exempt_labels: labels to exclude :param mini_batch_size: number of sentences to embed at same time :return: """ with torch.no_grad(): dataloader = DataLoader(data, batch_size=mini_batch_size) new_prototypes = torch.zeros(self.num_prototypes, self. prototype_size, device=flair.device) counter: 'Counter' = Counter() for batch in tqdm(dataloader): logits, labels = encoder.forward_pass(batch) if len(labels) > 0: if self.metric_space_decoder is not None: logits = self.metric_space_decoder(logits) for logit, label in zip(logits, labels): counter.update(label) idx = encoder.label_dictionary.get_idx_for_item(label [0]) new_prototypes[idx] += logit store_embeddings(batch, storage_mode='none') for label, count in counter.most_common(): average_prototype = new_prototypes[encoder.label_dictionary .get_idx_for_item(label)] / count new_prototypes[encoder.label_dictionary.get_idx_for_item(label) ] = average_prototype for label in exempt_labels: label_idx = encoder.label_dictionary.get_idx_for_item(label) new_prototypes[label_idx] = self.prototype_vectors[label_idx] self.prototype_vectors.data = new_prototypes def forward(self, input_0): primals_2 = self.prototype_vectors primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
k2room/ParaphraseQA
PrototypicalDecoder
false
12,651
[ "MIT" ]
0
5aebe02c26a0bac3731f18bb115b33ba3a772756
https://github.com/k2room/ParaphraseQA/tree/5aebe02c26a0bac3731f18bb115b33ba3a772756
TwoLinearsModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/2h/c2hxg45skvy6hnampidmbrudkadykbjbsftxsrksoez35npxavq6.py # Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge] # Source node to ATen node mapping: # h_relu => clamp_min # Graph fragment: # %add_tensor : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_tensor, 0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_tensor, 0), kwargs = {}) triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 64), (64, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 4), (1, 64), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge] stream0 = get_raw_stream(0) triton_poi_fused_clamp_ge_0.run(buf0, primals_3, buf1, buf3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 class TwoLinearsModel(nn.Module): def __init__(self, per_sample_shape: 'list', hidden_size: 'int', output_size: 'int'): super(TwoLinearsModel, self).__init__() assert len(per_sample_shape) == 3 self.per_sample_shape = per_sample_shape input_size = per_sample_shape[0] for dim in per_sample_shape[1:]: input_size *= dim self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, output_size) def forward(self, x: 'torch.Tensor'): batch_size = x.size(0) x = x.view(batch_size, -1) h_relu = self.linear1(x).clamp(min=0) y_pred = self.linear2(h_relu) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'per_sample_shape': [4, 4, 4], 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 64), (64, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (1, 64), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_0[grid(16)](buf0, primals_3, buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, primals_4, buf3 class TwoLinearsModelNew(nn.Module): def __init__(self, per_sample_shape: 'list', hidden_size: 'int', output_size: 'int'): super(TwoLinearsModelNew, self).__init__() assert len(per_sample_shape) == 3 self.per_sample_shape = per_sample_shape input_size = per_sample_shape[0] for dim in per_sample_shape[1:]: input_size *= dim self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, output_size) def forward(self, input_0): primals_2 = self.linear1.weight primals_3 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
arjunsuresh/aimet
TwoLinearsModel
false
12,652
[ "BSD-3-Clause" ]
0
f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
https://github.com/arjunsuresh/aimet/tree/f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/la/clalnn5iz2syotwgvds5fjb6mtcklh5yizks6zdxu552jin7zbwe.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_0 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dg/cdgi57gzwpeejdwbzdsfg4uihbr2hunpoyspa5gj2rdusiuxnqhs.py # Topologically Sorted Source Nodes: [x_4, l0], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # l0 => gt_2, mul_2, where_2 # x_4 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_8, %primals_9, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %convolution_2), kwargs = {}) # %where_2 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_1 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/l2/cl26qlqbg4ashxwthfa7y7w2lu5hks2tixgyjr7r7uwzp4cso4h4.py # Topologically Sorted Source Nodes: [x_5, h0, sub], Original ATen: [aten.convolution, aten._prelu_kernel, aten.sub] # Source node to ATen node mapping: # h0 => gt_3, mul_3, where_3 # sub => sub # x_5 => convolution_3 # Graph fragment: # %convolution_3 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_11, %primals_12, [2, 2], [2, 2], [1, 1], True, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %convolution_3), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %where_1), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_sub_2 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6u/c6upo47sukvb6r3ezcbrtiu4k23o3ipxybjgvcbho3hh7vlgzgss.py # Topologically Sorted Source Nodes: [x_6, l1, x_7], Original ATen: [aten.convolution, aten._prelu_kernel, aten.add] # Source node to ATen node mapping: # l1 => gt_4, mul_4, where_4 # x_6 => convolution_4 # x_7 => add # Graph fragment: # %convolution_4 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%sub, %primals_14, %primals_15, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %convolution_4), kwargs = {}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %where_2), kwargs = {}) triton_poi_fused__prelu_kernel_add_convolution_3 = async_compile.triton('triton_poi_fused__prelu_kernel_add_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_add_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3x/c3xyjskkjups25u3j3yhj4hlfs2qrwtmbtvecyuaysvh4x3jm6tn.py # Topologically Sorted Source Nodes: [x_9, l0_1, sub_1], Original ATen: [aten.convolution, aten._prelu_kernel, aten.sub] # Source node to ATen node mapping: # l0_1 => gt_6, mul_6, where_6 # sub_1 => sub_1 # x_9 => convolution_6 # Graph fragment: # %convolution_6 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_20, %primals_21, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, %convolution_6), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_6, %add), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_sub_4 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_sub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/l6/cl6hfaxvr7gbbjmd7ss5mpea4jfxmddqefs22ndohfkcmqyg7oev.py # Topologically Sorted Source Nodes: [x_10, h1, x_11], Original ATen: [aten.convolution, aten._prelu_kernel, aten.add] # Source node to ATen node mapping: # h1 => gt_7, mul_7, where_7 # x_10 => convolution_7 # x_11 => add_1 # Graph fragment: # %convolution_7 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%sub_1, %primals_23, %primals_24, [2, 2], [2, 2], [1, 1], True, [0, 0], 1), kwargs = {}) # %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_7, 0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %convolution_7), kwargs = {}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %convolution_7, %mul_7), kwargs = {}) # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_7, %where_5), kwargs = {}) triton_poi_fused__prelu_kernel_add_convolution_5 = async_compile.triton('triton_poi_fused__prelu_kernel_add_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_add_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/me/cmekfe7l6ph2yyraier4zgu6oqvnrkly54r6gxogod3onxbjbh7d.py # Topologically Sorted Source Nodes: [x_22, x_23], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # x_22 => convolution_15 # x_23 => gt_15, mul_15, where_15 # Graph fragment: # %convolution_15 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_14, %primals_47, %primals_48, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %gt_15 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_15, 0), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, %convolution_15), kwargs = {}) # %where_15 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %convolution_15, %mul_15), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_6 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (1, ), (1, )) assert_size_stride(primals_11, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (1, ), (1, )) assert_size_stride(primals_14, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (1, ), (1, )) assert_size_stride(primals_17, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_18, (4, ), (1, )) assert_size_stride(primals_19, (1, ), (1, )) assert_size_stride(primals_20, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_21, (4, ), (1, )) assert_size_stride(primals_22, (1, ), (1, )) assert_size_stride(primals_23, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_24, (4, ), (1, )) assert_size_stride(primals_25, (1, ), (1, )) assert_size_stride(primals_26, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_27, (4, ), (1, )) assert_size_stride(primals_28, (1, ), (1, )) assert_size_stride(primals_29, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_30, (4, ), (1, )) assert_size_stride(primals_31, (1, ), (1, )) assert_size_stride(primals_32, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_33, (4, ), (1, )) assert_size_stride(primals_34, (1, ), (1, )) assert_size_stride(primals_35, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_36, (4, ), (1, )) assert_size_stride(primals_37, (1, ), (1, )) assert_size_stride(primals_38, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_39, (4, ), (1, )) assert_size_stride(primals_40, (1, ), (1, )) assert_size_stride(primals_41, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_42, (4, ), (1, )) assert_size_stride(primals_43, (1, ), (1, )) assert_size_stride(primals_44, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_45, (4, ), (1, )) assert_size_stride(primals_46, (1, ), (1, )) assert_size_stride(primals_47, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_48, (4, ), (1, )) assert_size_stride(primals_49, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._prelu_kernel] stream0 = get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf4, primals_6, primals_7, buf5, 256, grid=grid(256), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1)) buf7 = buf6; del buf6 # reuse buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4, l0], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_1.run(buf7, primals_9, primals_10, buf8, 64, grid=grid(64), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_11, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9; del buf9 # reuse buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5, h0, sub], Original ATen: [aten.convolution, aten._prelu_kernel, aten.sub] triton_poi_fused__prelu_kernel_convolution_sub_2.run(buf10, primals_12, primals_13, buf5, buf11, 256, grid=grid(256), stream=stream0) del primals_12 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_14, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 2, 2), (16, 4, 2, 1)) buf13 = buf12; del buf12 # reuse buf14 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6, l1, x_7], Original ATen: [aten.convolution, aten._prelu_kernel, aten.add] triton_poi_fused__prelu_kernel_add_convolution_3.run(buf13, primals_15, primals_16, buf8, buf14, 64, grid=grid(64), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, primals_17, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = buf15; del buf15 # reuse buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8, h0_1], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf16, primals_18, primals_19, buf17, 256, grid=grid(256), stream=stream0) del primals_18 # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_20, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 4, 2, 2), (16, 4, 2, 1)) buf19 = buf18; del buf18 # reuse buf20 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9, l0_1, sub_1], Original ATen: [aten.convolution, aten._prelu_kernel, aten.sub] triton_poi_fused__prelu_kernel_convolution_sub_4.run(buf19, primals_21, primals_22, buf14, buf20, 64, grid=grid(64), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf20, primals_23, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1)) buf22 = buf21; del buf21 # reuse buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10, h1, x_11], Original ATen: [aten.convolution, aten._prelu_kernel, aten.add] triton_poi_fused__prelu_kernel_add_convolution_5.run(buf22, primals_24, primals_25, buf17, buf23, 256, grid=grid(256), stream=stream0) del primals_24 # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_26, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 4, 2, 2), (16, 4, 2, 1)) buf25 = buf24; del buf24 # reuse buf26 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_12, l0_2], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_1.run(buf25, primals_27, primals_28, buf26, 64, grid=grid(64), stream=stream0) del primals_27 # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, primals_29, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1)) buf28 = buf27; del buf27 # reuse buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_13, h0_2, sub_2], Original ATen: [aten.convolution, aten._prelu_kernel, aten.sub] triton_poi_fused__prelu_kernel_convolution_sub_2.run(buf28, primals_30, primals_31, buf23, buf29, 256, grid=grid(256), stream=stream0) del primals_30 # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_32, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 2, 2), (16, 4, 2, 1)) buf31 = buf30; del buf30 # reuse buf32 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_14, l1_1, x_15], Original ATen: [aten.convolution, aten._prelu_kernel, aten.add] triton_poi_fused__prelu_kernel_add_convolution_3.run(buf31, primals_33, primals_34, buf26, buf32, 64, grid=grid(64), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] buf33 = extern_kernels.convolution(buf32, primals_35, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 4, 4, 4), (64, 16, 4, 1)) buf34 = buf33; del buf33 # reuse buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_16, h0_3], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf34, primals_36, primals_37, buf35, 256, grid=grid(256), stream=stream0) del primals_36 # Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_38, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 4, 2, 2), (16, 4, 2, 1)) buf37 = buf36; del buf36 # reuse buf38 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_17, l0_3, sub_3], Original ATen: [aten.convolution, aten._prelu_kernel, aten.sub] triton_poi_fused__prelu_kernel_convolution_sub_4.run(buf37, primals_39, primals_40, buf32, buf38, 64, grid=grid(64), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution] buf39 = extern_kernels.convolution(buf38, primals_41, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 4, 4, 4), (64, 16, 4, 1)) buf40 = buf39; del buf39 # reuse buf41 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_18, h1_1, x_19], Original ATen: [aten.convolution, aten._prelu_kernel, aten.add] triton_poi_fused__prelu_kernel_add_convolution_5.run(buf40, primals_42, primals_43, buf35, buf41, 256, grid=grid(256), stream=stream0) del primals_42 # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 4, 4, 4), (64, 16, 4, 1)) buf43 = buf42; del buf42 # reuse buf44 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf43, primals_45, primals_46, buf44, 256, grid=grid(256), stream=stream0) del primals_45 # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] buf45 = extern_kernels.convolution(buf44, primals_47, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 4, 8, 8), (256, 64, 8, 1)) buf46 = buf45; del buf45 # reuse buf47 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_22, x_23], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_6.run(buf46, primals_48, primals_49, buf47, 1024, grid=grid(1024), stream=stream0) del primals_48 return (buf47, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, primals_14, primals_16, primals_17, primals_19, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, primals_29, primals_31, primals_32, primals_34, primals_35, primals_37, primals_38, primals_40, primals_41, primals_43, primals_44, primals_46, primals_47, primals_49, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37, buf38, buf40, buf41, buf43, buf44, buf46, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((4, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_size, out_size, kernel=3, stride=1, padding=1, activ='relu', norm=None): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_size, out_size, kernel, stride, padding) self.norm = norm self.activ = activ if self.norm == 'batch': self.bn = nn.BatchNorm2d(out_size) elif self.norm == 'instance': self.bn = nn.InstanceNorm2d(out_size) if self.activ == 'relu': self.act = nn.ReLU(True) elif self.activ == 'prelu': self.act = nn.PReLU() elif self.activ == 'lrelu': self.act = nn.LeakyReLU(0.2, True) elif self.activ == 'tanh': self.act = nn.Tanh() def forward(self, x): if self.norm is not None: x = self.bn(self.conv(x)) else: x = self.conv(x) if self.activ is not None: return self.act(x) else: return x class DeconvBlock(nn.Module): def __init__(self, in_size, out_size, kernel=4, stride=2, padding=1, activ='relu', norm=None): super(DeconvBlock, self).__init__() self.deconv = nn.ConvTranspose2d(in_size, out_size, kernel, stride, padding) self.norm = norm self.activ = activ if self.norm == 'batch': self.bn = nn.BatchNorm2d(out_size) elif self.norm == 'instance': self.bn = nn.InstanceNorm2d(out_size) if self.activ == 'relu': self.act = nn.ReLU(True) elif self.activ == 'prelu': self.act = nn.PReLU() elif self.activ == 'lrelu': self.act = nn.LeakyReLU(0.2, True) elif self.activ == 'tanh': self.act = nn.Tanh() def forward(self, x): if self.norm is not None: x = self.bn(self.deconv(x)) else: x = self.deconv(x) if self.activ is not None: return self.act(x) else: return x class DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, activation='prelu'): super(DownBlock, self).__init__() self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) def forward(self, x): l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 class UpBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, activation='prelu'): super(UpBlock, self).__init__() self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) def forward(self, x): h0 = self.up_conv1(x) l0 = self.up_conv2(h0) h1 = self.up_conv3(l0 - x) return h1 + h0 class Net(nn.Module): def __init__(self, channels, filters, features, scale_fact): super(Net, self).__init__() self.lay1 = ConvBlock(in_size=channels, out_size=features, kernel=3, stride=1, padding=1, activ='prelu') self.lay2 = ConvBlock(in_size=features, out_size=filters, kernel=1, stride=1, padding=0, activ='prelu') self.down1 = DownBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.up1 = UpBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.down2 = DownBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.up2 = UpBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.out1 = DeconvBlock(in_size=filters, out_size=features, kernel= 1, stride=1, padding=0, activ='prelu') self.out2 = DeconvBlock(in_size=features, out_size=channels, kernel =2, stride=2, padding=0, activ='prelu') def forward(self, x): x = self.lay1(x) x = self.lay2(x) x = self.down1(x) x = self.up1(x) x = self.down2(x) x = self.up2(x) x = self.out1(x) x = self.out2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'filters': 4, 'features': 4, 'scale_fact': 1.0} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (1,), (1,)) assert_size_stride(primals_17, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (1,), (1,)) assert_size_stride(primals_20, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (1,), (1,)) assert_size_stride(primals_23, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_24, (4,), (1,)) assert_size_stride(primals_25, (1,), (1,)) assert_size_stride(primals_26, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (1,), (1,)) assert_size_stride(primals_29, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_30, (4,), (1,)) assert_size_stride(primals_31, (1,), (1,)) assert_size_stride(primals_32, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_33, (4,), (1,)) assert_size_stride(primals_34, (1,), (1,)) assert_size_stride(primals_35, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_36, (4,), (1,)) assert_size_stride(primals_37, (1,), (1,)) assert_size_stride(primals_38, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_39, (4,), (1,)) assert_size_stride(primals_40, (1,), (1,)) assert_size_stride(primals_41, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_42, (4,), (1,)) assert_size_stride(primals_43, (1,), (1,)) assert_size_stride(primals_44, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_45, (4,), (1,)) assert_size_stride(primals_46, (1,), (1,)) assert_size_stride(primals_47, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_48, (4,), (1,)) assert_size_stride(primals_49, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf4, primals_6, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_1[grid(64)](buf7, primals_9, primals_10, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf9 = extern_kernels.convolution(buf8, primals_11, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_2[grid(256)](buf10, primals_12, primals_13, buf5, buf11, 256, XBLOCK=256, num_warps =4, num_stages=1) del primals_12 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 2, 2), (16, 4, 2, 1)) buf13 = buf12 del buf12 buf14 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_3[grid(64)](buf13, primals_15, primals_16, buf8, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 buf15 = extern_kernels.convolution(buf14, primals_17, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = buf15 del buf15 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf16, primals_18, primals_19, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_18 buf18 = extern_kernels.convolution(buf17, primals_20, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 4, 2, 2), (16, 4, 2, 1)) buf19 = buf18 del buf18 buf20 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_4[grid(64)](buf19, primals_21, primals_22, buf14, buf20, 64, XBLOCK=64, num_warps= 1, num_stages=1) del primals_21 buf21 = extern_kernels.convolution(buf20, primals_23, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1)) buf22 = buf21 del buf21 buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_5[grid(256)](buf22, primals_24, primals_25, buf17, buf23, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_24 buf24 = extern_kernels.convolution(buf23, primals_26, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 4, 2, 2), (16, 4, 2, 1)) buf25 = buf24 del buf24 buf26 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_1[grid(64)](buf25, primals_27, primals_28, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_27 buf27 = extern_kernels.convolution(buf26, primals_29, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1)) buf28 = buf27 del buf27 buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_2[grid(256)](buf28, primals_30, primals_31, buf23, buf29, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_30 buf30 = extern_kernels.convolution(buf29, primals_32, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 2, 2), (16, 4, 2, 1)) buf31 = buf30 del buf30 buf32 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_3[grid(64)](buf31, primals_33, primals_34, buf26, buf32, 64, XBLOCK=64, num_warps= 1, num_stages=1) del primals_33 buf33 = extern_kernels.convolution(buf32, primals_35, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 4, 4, 4), (64, 16, 4, 1)) buf34 = buf33 del buf33 buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf34, primals_36, primals_37, buf35, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_36 buf36 = extern_kernels.convolution(buf35, primals_38, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 4, 2, 2), (16, 4, 2, 1)) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_4[grid(64)](buf37, primals_39, primals_40, buf32, buf38, 64, XBLOCK=64, num_warps= 1, num_stages=1) del primals_39 buf39 = extern_kernels.convolution(buf38, primals_41, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 4, 4, 4), (64, 16, 4, 1)) buf40 = buf39 del buf39 buf41 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_5[grid(256)](buf40, primals_42, primals_43, buf35, buf41, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_42 buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 4, 4, 4), (64, 16, 4, 1)) buf43 = buf42 del buf42 buf44 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf43, primals_45, primals_46, buf44, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_45 buf45 = extern_kernels.convolution(buf44, primals_47, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 4, 8, 8), (256, 64, 8, 1)) buf46 = buf45 del buf45 buf47 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_6[grid(1024)](buf46, primals_48, primals_49, buf47, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_48 return (buf47, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, primals_14, primals_16, primals_17, primals_19, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, primals_29, primals_31, primals_32, primals_34, primals_35, primals_37, primals_38, primals_40, primals_41, primals_43, primals_44, primals_46, primals_47, primals_49, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37, buf38, buf40, buf41, buf43, buf44, buf46) class ConvBlock(nn.Module): def __init__(self, in_size, out_size, kernel=3, stride=1, padding=1, activ='relu', norm=None): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_size, out_size, kernel, stride, padding) self.norm = norm self.activ = activ if self.norm == 'batch': self.bn = nn.BatchNorm2d(out_size) elif self.norm == 'instance': self.bn = nn.InstanceNorm2d(out_size) if self.activ == 'relu': self.act = nn.ReLU(True) elif self.activ == 'prelu': self.act = nn.PReLU() elif self.activ == 'lrelu': self.act = nn.LeakyReLU(0.2, True) elif self.activ == 'tanh': self.act = nn.Tanh() def forward(self, x): if self.norm is not None: x = self.bn(self.conv(x)) else: x = self.conv(x) if self.activ is not None: return self.act(x) else: return x class DeconvBlock(nn.Module): def __init__(self, in_size, out_size, kernel=4, stride=2, padding=1, activ='relu', norm=None): super(DeconvBlock, self).__init__() self.deconv = nn.ConvTranspose2d(in_size, out_size, kernel, stride, padding) self.norm = norm self.activ = activ if self.norm == 'batch': self.bn = nn.BatchNorm2d(out_size) elif self.norm == 'instance': self.bn = nn.InstanceNorm2d(out_size) if self.activ == 'relu': self.act = nn.ReLU(True) elif self.activ == 'prelu': self.act = nn.PReLU() elif self.activ == 'lrelu': self.act = nn.LeakyReLU(0.2, True) elif self.activ == 'tanh': self.act = nn.Tanh() def forward(self, x): if self.norm is not None: x = self.bn(self.deconv(x)) else: x = self.deconv(x) if self.activ is not None: return self.act(x) else: return x class DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, activation='prelu'): super(DownBlock, self).__init__() self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) def forward(self, x): l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 class UpBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, activation='prelu'): super(UpBlock, self).__init__() self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) def forward(self, x): h0 = self.up_conv1(x) l0 = self.up_conv2(h0) h1 = self.up_conv3(l0 - x) return h1 + h0 class NetNew(nn.Module): def __init__(self, channels, filters, features, scale_fact): super(NetNew, self).__init__() self.lay1 = ConvBlock(in_size=channels, out_size=features, kernel=3, stride=1, padding=1, activ='prelu') self.lay2 = ConvBlock(in_size=features, out_size=filters, kernel=1, stride=1, padding=0, activ='prelu') self.down1 = DownBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.up1 = UpBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.down2 = DownBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.up2 = UpBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.out1 = DeconvBlock(in_size=filters, out_size=features, kernel= 1, stride=1, padding=0, activ='prelu') self.out2 = DeconvBlock(in_size=features, out_size=channels, kernel =2, stride=2, padding=0, activ='prelu') def forward(self, input_0): primals_1 = self.lay1.conv.weight primals_2 = self.lay1.conv.bias primals_4 = self.lay1.act.weight primals_5 = self.lay2.conv.weight primals_6 = self.lay2.conv.bias primals_7 = self.lay2.act.weight primals_8 = self.down1.down_conv1.conv.weight primals_9 = self.down1.down_conv1.conv.bias primals_10 = self.down1.down_conv1.act.weight primals_11 = self.down1.down_conv2.deconv.weight primals_12 = self.down1.down_conv2.deconv.bias primals_13 = self.down1.down_conv2.act.weight primals_14 = self.down1.down_conv3.conv.weight primals_15 = self.down1.down_conv3.conv.bias primals_16 = self.down1.down_conv3.act.weight primals_17 = self.up1.up_conv1.deconv.weight primals_18 = self.up1.up_conv1.deconv.bias primals_19 = self.up1.up_conv1.act.weight primals_20 = self.up1.up_conv2.conv.weight primals_21 = self.up1.up_conv2.conv.bias primals_22 = self.up1.up_conv2.act.weight primals_23 = self.up1.up_conv3.deconv.weight primals_24 = self.up1.up_conv3.deconv.bias primals_25 = self.up1.up_conv3.act.weight primals_26 = self.down2.down_conv1.conv.weight primals_27 = self.down2.down_conv1.conv.bias primals_28 = self.down2.down_conv1.act.weight primals_29 = self.down2.down_conv2.deconv.weight primals_30 = self.down2.down_conv2.deconv.bias primals_31 = self.down2.down_conv2.act.weight primals_32 = self.down2.down_conv3.conv.weight primals_33 = self.down2.down_conv3.conv.bias primals_34 = self.down2.down_conv3.act.weight primals_35 = self.up2.up_conv1.deconv.weight primals_36 = self.up2.up_conv1.deconv.bias primals_37 = self.up2.up_conv1.act.weight primals_38 = self.up2.up_conv2.conv.weight primals_39 = self.up2.up_conv2.conv.bias primals_40 = self.up2.up_conv2.act.weight primals_41 = self.up2.up_conv3.deconv.weight primals_42 = self.up2.up_conv3.deconv.bias primals_43 = self.up2.up_conv3.act.weight primals_44 = self.out1.deconv.weight primals_45 = self.out1.deconv.bias primals_46 = self.out1.act.weight primals_47 = self.out2.deconv.weight primals_48 = self.out2.deconv.bias primals_49 = self.out2.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49]) return output[0]
jth1011/ECE539-Project
Net
false
12,653
[ "MIT" ]
0
bce6ffd75da92e862d8fda3852be247602b1567e
https://github.com/jth1011/ECE539-Project/tree/bce6ffd75da92e862d8fda3852be247602b1567e
ColorJitterLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/k5/ck5gtelstphbmeyxj47cxu7yycqfyjvkdcsi2irtvkcdext2kdfh.py # Topologically Sorted Source Nodes: [hsv], Original ATen: [aten.stack] # Source node to ATen node mapping: # hsv => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %sub_3, %getitem], 1), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 12 x0 = xindex % 4 x2 = (xindex // 48) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (32 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.7320508075688772 tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + (x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp11 = 2.0 tmp12 = tmp10 * tmp11 tmp13 = tmp12 - tmp5 tmp14 = tmp13 - tmp6 tmp15 = libdevice.atan2(tmp9, tmp14) tmp16 = 6.283185307179586 tmp17 = tmp15 % tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = tmp17 != tmp18 tmp20 = libdevice.signbit(tmp17) if (tmp17).dtype is tl.float32 else tmp17 < 0 tmp21 = libdevice.signbit(tmp16) if (tmp16).dtype is tl.float32 else tmp16 < 0 tmp22 = tmp20 != tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 + tmp16 tmp25 = tl.where(tmp23, tmp24, tmp17) tmp26 = 0.15915494309189535 tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp4, tmp27, tmp28) tmp30 = tmp0 >= tmp3 tmp31 = tl.full([1], 8, tl.int64) tmp32 = tmp0 < tmp31 tmp33 = tmp30 & tmp32 tmp34 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0) tmp35 = tl.load(in_ptr0 + (16 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0) tmp36 = triton_helpers.minimum(tmp34, tmp35) tmp37 = tl.load(in_ptr0 + (32 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0) tmp38 = triton_helpers.minimum(tmp36, tmp37) tmp39 = tl.load(in_ptr0 + (48 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0) tmp40 = triton_helpers.minimum(tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp34, tmp35) tmp42 = triton_helpers.maximum(tmp41, tmp37) tmp43 = triton_helpers.maximum(tmp42, tmp39) tmp44 = 1e-08 tmp45 = tmp43 + tmp44 tmp46 = tmp40 / tmp45 tmp47 = 1.0 tmp48 = tmp47 - tmp46 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp33, tmp48, tmp49) tmp51 = tmp0 >= tmp31 tmp52 = tl.full([1], 12, tl.int64) tmp53 = tmp0 < tmp52 tmp54 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0) tmp55 = tl.load(in_ptr0 + (16 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0) tmp56 = triton_helpers.maximum(tmp54, tmp55) tmp57 = tl.load(in_ptr0 + (32 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0) tmp58 = triton_helpers.maximum(tmp56, tmp57) tmp59 = tl.load(in_ptr0 + (48 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0) tmp60 = triton_helpers.maximum(tmp58, tmp59) tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp51, tmp60, tmp61) tmp63 = tl.where(tmp33, tmp50, tmp62) tmp64 = tl.where(tmp4, tmp29, tmp63) tl.store(out_ptr0 + (x3), tmp64, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gg/cggxgz5kv3f6v57dm57mpxkp77xbxjw2mh4mscq7a4rphyiqpca4.py # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # setitem => full_default_3, index_put # Graph fragment: # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=4] = call_function[target=torch.ops.aten.index_put_.default](args = (%view, [%bitwise_not], %full_default_3), kwargs = {}) triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 == tmp0 tmp2 = tl_math.abs(tmp0) tmp3 = float("inf") tmp4 = tmp2 != tmp3 tmp5 = tmp1 & tmp4 tmp6 = tmp5 == 0 tmp7 = 0.0 tmp8 = tl.where(tmp6, tmp7, tmp0) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tw/ctw7vqtn4wrdd6k3eojcn6wy2cqgcazcuexsxhfewjvf3xjv6ole.py # Topologically Sorted Source Nodes: [f_h], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # f_h => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 1], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_2 = async_compile.triton('triton_poi_fused_new_zeros_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/v4/cv46aopjpsjvzqshfrlrag6y4ifo6nlt2huynabmbhvbs437klh2.py # Topologically Sorted Source Nodes: [f_s], Original ATen: [aten.new_ones] # Source node to ATen node mapping: # f_s => full_default_1 # Graph fragment: # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_ones_3 = async_compile.triton('triton_poi_fused_new_ones_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_ones_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_ones_3(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mq/cmqhtrxejw46pbgdo6fh4f7w3qkoe46hoelc5we53jnftjhog4ar.py # Topologically Sorted Source Nodes: [mul_2, truediv_2, h_1, h_2, setitem_1, mul_3, setitem_2], Original ATen: [aten.mul, aten.div, aten.add, aten.remainder, aten.copy] # Source node to ATen node mapping: # h_1 => add_1 # h_2 => remainder_1 # mul_2 => mul_3 # mul_3 => mul_4 # setitem_1 => copy # setitem_2 => copy_1 # truediv_2 => div_2 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%uniform, 255.0), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, 360.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_4, %div_2), kwargs = {}) # %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%select_int, %add_1, 2, 0, 9223372036854775807), kwargs = {}) # %select_scatter_default : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%index_put, %slice_scatter_default, 1, 0), kwargs = {}) # %remainder_1 : [num_users=1] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%select_6, 1), kwargs = {}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_8, %remainder_1), kwargs = {}) # %select_scatter_default_1 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %copy, 1, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, %uniform_1), kwargs = {}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_14, %mul_4), kwargs = {}) # %select_scatter_default_2 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %copy_1, 1, 1), kwargs = {}) triton_poi_fused_add_copy_div_mul_remainder_4 = async_compile.triton('triton_poi_fused_add_copy_div_mul_remainder_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_copy_div_mul_remainder_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_copy_div_mul_remainder_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 3 x0 = xindex % 16 x2 = (xindex // 48) x3 = xindex tmp6 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (16 + x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (x3), xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp1 == tmp3 tmp5 = tmp3 == tmp3 tmp8 = 255.0 tmp9 = tmp7 * tmp8 tmp10 = 0.002777777777777778 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tmp13 = tl.where(tmp5, tmp12, tmp6) tmp14 = 1.0 tmp15 = tmp13 % tmp14 tmp16 = tmp15 != tmp3 tmp17 = libdevice.signbit(tmp15) if (tmp15).dtype is tl.float32 else tmp15 < 0 tmp18 = libdevice.signbit(tmp14) if (tmp14).dtype is tl.float32 else tmp14 < 0 tmp19 = tmp17 != tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp15 + tmp14 tmp22 = tl.where(tmp20, tmp21, tmp15) tmp24 = tl.where(tmp4, tmp12, tmp23) tmp25 = tl.where(tmp4, tmp22, tmp24) tmp27 = tmp25 * tmp26 tmp28 = tmp0 == tmp3 tmp30 = tl.where(tmp28, tmp12, tmp29) tmp31 = tl.where(tmp28, tmp22, tmp30) tmp32 = tl.where(tmp2, tmp27, tmp31) tl.store(out_ptr0 + (x3), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zq/czq45ck4e3bfpgidquurvbk77tkggknyyrok7iefanwqczot3sts.py # Topologically Sorted Source Nodes: [mul_4, setitem_3, x, v, s, c, h_3, mul_6, add_1, k, sub_4, t, t_1, mul_7, x_1, means, sub_6, mul_8, x_2, inputs], Original ATen: [aten.mul, aten.copy, aten.clamp, aten.index, aten.add, aten.remainder, aten.rsub, aten.minimum, aten.sub, aten.mean] # Source node to ATen node mapping: # add_1 => add_2 # c => mul_6 # h_3 => index # inputs => clamp_max_2, clamp_min_2 # k => remainder_2 # means => mean # mul_4 => mul_5 # mul_6 => mul_7 # mul_7 => mul_8 # mul_8 => mul_9 # s => index_1 # setitem_3 => copy_2 # sub_4 => sub_4 # sub_6 => sub_6 # t => minimum # t_1 => clamp_max_1, clamp_min_1 # v => index_2 # x => clamp_max, clamp_min # x_1 => sub_5 # x_2 => add_3 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_18, %uniform_2), kwargs = {}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_20, %mul_5), kwargs = {}) # %select_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %copy_2, 1, 2), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%select_scatter_default_3, 0), kwargs = {}) # %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {}) # %index_2 : [num_users=2] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_6]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_5]), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_2, %index_1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_4]), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, 6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_7), kwargs = {}) # %remainder_2 : [num_users=2] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%add_2, 6), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4.0, %remainder_2), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%remainder_2, %sub_4), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%minimum, 0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %clamp_max_1), kwargs = {}) # %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%index_2, %mul_8), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_5, [2, 3], True), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %mean), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %uniform_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mean), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_3, 0), kwargs = {}) # %clamp_max_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1), kwargs = {}) triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5 = async_compile.triton('triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 12 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 3 r2 = rindex x1 = (xindex // 3) x3 = xindex tmp13 = tl.load(in_ptr0 + (32 + r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr0 + (16 + r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1, 1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 3.0 tmp6 = 1.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = 5.0 tmp9 = tl.where(tmp2, tmp8, tmp7) tmp10 = tl.full([1, 1], 0, tl.int32) tmp11 = tl.full([1, 1], 2, tl.int32) tmp12 = tmp10 == tmp11 tmp15 = tmp13 * tmp14 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = triton_helpers.minimum(tmp19, tmp6) tmp21 = 6.0 tmp22 = tmp20 * tmp21 tmp23 = tmp9 + tmp22 tmp24 = tmp23 % tmp21 tmp25 = tmp24 != tmp10 tmp26 = libdevice.signbit(tmp24) if (tmp24).dtype is tl.float32 else tmp24 < 0 tmp27 = libdevice.signbit(tmp21) if (tmp21).dtype is tl.float32 else tmp21 < 0 tmp28 = tmp26 != tmp27 tmp29 = tmp25 & tmp28 tmp30 = tmp24 + tmp21 tmp31 = tl.where(tmp29, tmp30, tmp24) tmp32 = 4.0 tmp33 = tmp32 - tmp31 tmp34 = triton_helpers.minimum(tmp31, tmp33) tmp35 = triton_helpers.maximum(tmp34, tmp18) tmp36 = tmp11 == tmp11 tmp37 = tl.where(tmp36, tmp15, tmp13) tmp38 = triton_helpers.maximum(tmp37, tmp18) tmp39 = triton_helpers.minimum(tmp38, tmp6) tmp40 = tl.full([1, 1], 1, tl.int32) tmp41 = tmp40 == tmp11 tmp43 = tl.where(tmp41, tmp15, tmp42) tmp44 = triton_helpers.maximum(tmp43, tmp18) tmp45 = triton_helpers.minimum(tmp44, tmp6) tmp46 = tmp39 * tmp45 tmp47 = triton_helpers.minimum(tmp35, tmp6) tmp48 = tmp46 * tmp47 tmp49 = tmp39 - tmp48 tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.where(xmask, tmp50, 0) tmp53 = tl.sum(tmp52, 1)[:, None] tmp54 = 16.0 tmp55 = tmp53 / tmp54 tmp56 = tmp49 - tmp55 tmp58 = tmp56 * tmp57 tmp59 = tmp58 + tmp55 tmp60 = triton_helpers.maximum(tmp59, tmp18) tmp61 = triton_helpers.minimum(tmp60, tmp6) tl.store(out_ptr1 + (r2 + (16*x3)), tmp61, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12, 4), (48, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hsv], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] triton_poi_fused_index_put_lift_fresh_1.run(buf0, buf0, 192, grid=grid(192), stream=stream0) buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_h], Original ATen: [aten.new_zeros] triton_poi_fused_new_zeros_2.run(buf7, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [f_h, uniform_], Original ATen: [aten.new_zeros, aten.uniform] buf8 = torch.ops.aten.uniform.default(buf7, -4.0, 4.0) buf9 = buf8 del buf8 buf10 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [f_s], Original ATen: [aten.new_ones] triton_poi_fused_new_ones_3.run(buf10, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [f_s, f_s_1], Original ATen: [aten.new_ones, aten.uniform] buf11 = torch.ops.aten.uniform.default(buf10, 0.0, 5.0) del buf10 buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, truediv_2, h_1, h_2, setitem_1, mul_3, setitem_2], Original ATen: [aten.mul, aten.div, aten.add, aten.remainder, aten.copy] triton_poi_fused_add_copy_div_mul_remainder_4.run(buf0, buf9, buf12, buf13, 192, grid=grid(192), stream=stream0) del buf12 buf14 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [f_v], Original ATen: [aten.new_ones] triton_poi_fused_new_ones_3.run(buf14, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [f_v, f_v_1], Original ATen: [aten.new_ones, aten.uniform] buf15 = torch.ops.aten.uniform.default(buf14, 0.0, 5.0) buf16 = buf15 del buf15 buf20 = reinterpret_tensor(buf14, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf14 # reuse # Topologically Sorted Source Nodes: [factor], Original ATen: [aten.uniform] buf21 = torch.ops.aten.uniform.default(buf20, 0.0, 5.0) del buf20 buf22 = buf21 del buf21 buf23 = reinterpret_tensor(buf0, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul_4, setitem_3, x, v, s, c, h_3, mul_6, add_1, k, sub_4, t, t_1, mul_7, x_1, means, sub_6, mul_8, x_2, inputs], Original ATen: [aten.mul, aten.copy, aten.clamp, aten.index, aten.add, aten.remainder, aten.rsub, aten.minimum, aten.sub, aten.mean] triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5.run(buf13, buf16, buf22, buf23, 12, 16, grid=grid(12), stream=stream0) del buf13 del buf16 del buf22 return (buf23, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import numbers import torch import numpy as np import torch.nn as nn import torch.utils.cpp_extension def hsv2rgb(hsv): """Convert a 4-d HSV tensor to the RGB counterpart. >>> %timeit hsv2rgb_lookup(hsv) 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit hsv2rgb(rgb) 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6) True References [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative """ h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] c = v * s n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) k = (n + h * 6) % 6 t = torch.min(k, 4.0 - k) t = torch.clamp(t, 0, 1) return v - c * t def rgb2hsv(rgb): """Convert a 4-d RGB tensor to the HSV counterpart. Here, we compute hue using atan2() based on the definition in [1], instead of using the common lookup table approach as in [2, 3]. Those values agree when the angle is a multiple of 30°, otherwise they may differ at most ~1.2°. >>> %timeit rgb2hsv_lookup(rgb) 1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit rgb2hsv(rgb) 380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max() tensor(0.0031, device='cuda:0') References [1] https://en.wikipedia.org/wiki/Hue [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html [3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212 """ r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] Cmax = rgb.max(1)[0] Cmin = rgb.min(1)[0] hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) hue = hue % (2 * math.pi) / (2 * math.pi) saturate = 1 - Cmin / (Cmax + 1e-08) value = Cmax hsv = torch.stack([hue, saturate, value], dim=1) hsv[~torch.isfinite(hsv)] = 0.0 return hsv class RandomHSVFunction(Function): @staticmethod def forward(ctx, x, f_h, f_s, f_v): x = rgb2hsv(x) h = x[:, 0, :, :] h += f_h * 255.0 / 360.0 h = h % 1 x[:, 0, :, :] = h x[:, 1, :, :] = x[:, 1, :, :] * f_s x[:, 2, :, :] = x[:, 2, :, :] * f_v x = torch.clamp(x, 0, 1) x = hsv2rgb(x) return x @staticmethod def backward(ctx, grad_output): grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output.clone() return grad_input, None, None, None class ColorJitterLayer(nn.Module): def __init__(self, brightness, contrast, saturation, hue): super(ColorJitterLayer, self).__init__() self.brightness = self._check_input(brightness, 'brightness') self.contrast = self._check_input(contrast, 'contrast') self.saturation = self._check_input(saturation, 'saturation') self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5 ), clip_first_on_zero=False) def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: raise ValueError( 'If {} is a single number, it must be non negative.'. format(name)) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: raise ValueError('{} values should be between {}'.format( name, bound)) else: raise TypeError( '{} should be a single number or a list/tuple with lenght 2.' .format(name)) if value[0] == value[1] == center: value = None return value def adjust_contrast(self, x): if self.contrast: factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast) means = torch.mean(x, dim=[2, 3], keepdim=True) x = (x - means) * factor + means return torch.clamp(x, 0, 1) def adjust_hsv(self, x): f_h = x.new_zeros(x.size(0), 1, 1) f_s = x.new_ones(x.size(0), 1, 1) f_v = x.new_ones(x.size(0), 1, 1) if self.hue: f_h.uniform_(*self.hue) if self.saturation: f_s = f_s.uniform_(*self.saturation) if self.brightness: f_v = f_v.uniform_(*self.brightness) return RandomHSVFunction.apply(x, f_h, f_s, f_v) def transform(self, inputs): if np.random.rand() > 0.5: transforms = [self.adjust_contrast, self.adjust_hsv] else: transforms = [self.adjust_hsv, self.adjust_contrast] for t in transforms: inputs = t(inputs) return inputs def forward(self, inputs): return self.transform(inputs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'brightness': 4, 'contrast': 4, 'saturation': 4, 'hue': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.autograd import Function import math import numbers import numpy as np import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 12 x0 = xindex % 4 x2 = xindex // 48 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.7320508075688772 tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp11 = 2.0 tmp12 = tmp10 * tmp11 tmp13 = tmp12 - tmp5 tmp14 = tmp13 - tmp6 tmp15 = libdevice.atan2(tmp9, tmp14) tmp16 = 6.283185307179586 tmp17 = tmp15 % tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = tmp17 != tmp18 tmp20 = libdevice.signbit(tmp17 ) if tmp17.dtype is tl.float32 else tmp17 < 0 tmp21 = libdevice.signbit(tmp16 ) if tmp16.dtype is tl.float32 else tmp16 < 0 tmp22 = tmp20 != tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 + tmp16 tmp25 = tl.where(tmp23, tmp24, tmp17) tmp26 = 0.15915494309189535 tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp4, tmp27, tmp28) tmp30 = tmp0 >= tmp3 tmp31 = tl.full([1], 8, tl.int64) tmp32 = tmp0 < tmp31 tmp33 = tmp30 & tmp32 tmp34 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp35 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp36 = triton_helpers.minimum(tmp34, tmp35) tmp37 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp38 = triton_helpers.minimum(tmp36, tmp37) tmp39 = tl.load(in_ptr0 + (48 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp40 = triton_helpers.minimum(tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp34, tmp35) tmp42 = triton_helpers.maximum(tmp41, tmp37) tmp43 = triton_helpers.maximum(tmp42, tmp39) tmp44 = 1e-08 tmp45 = tmp43 + tmp44 tmp46 = tmp40 / tmp45 tmp47 = 1.0 tmp48 = tmp47 - tmp46 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp33, tmp48, tmp49) tmp51 = tmp0 >= tmp31 tl.full([1], 12, tl.int64) tmp54 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp55 = tl.load(in_ptr0 + (16 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp56 = triton_helpers.maximum(tmp54, tmp55) tmp57 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp58 = triton_helpers.maximum(tmp56, tmp57) tmp59 = tl.load(in_ptr0 + (48 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp60 = triton_helpers.maximum(tmp58, tmp59) tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp51, tmp60, tmp61) tmp63 = tl.where(tmp33, tmp50, tmp62) tmp64 = tl.where(tmp4, tmp29, tmp63) tl.store(out_ptr0 + x3, tmp64, xmask) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 == tmp0 tmp2 = tl_math.abs(tmp0) tmp3 = float('inf') tmp4 = tmp2 != tmp3 tmp5 = tmp1 & tmp4 tmp6 = tmp5 == 0 tmp7 = 0.0 tmp8 = tl.where(tmp6, tmp7, tmp0) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_new_ones_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_copy_div_mul_remainder_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp6 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp1 == tmp3 tmp5 = tmp3 == tmp3 tmp8 = 255.0 tmp9 = tmp7 * tmp8 tmp10 = 0.002777777777777778 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tmp13 = tl.where(tmp5, tmp12, tmp6) tmp14 = 1.0 tmp15 = tmp13 % tmp14 tmp16 = tmp15 != tmp3 tmp17 = libdevice.signbit(tmp15 ) if tmp15.dtype is tl.float32 else tmp15 < 0 tmp18 = libdevice.signbit(tmp14 ) if tmp14.dtype is tl.float32 else tmp14 < 0 tmp19 = tmp17 != tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp15 + tmp14 tmp22 = tl.where(tmp20, tmp21, tmp15) tmp24 = tl.where(tmp4, tmp12, tmp23) tmp25 = tl.where(tmp4, tmp22, tmp24) tmp27 = tmp25 * tmp26 tmp28 = tmp0 == tmp3 tmp30 = tl.where(tmp28, tmp12, tmp29) tmp31 = tl.where(tmp28, tmp22, tmp30) tmp32 = tl.where(tmp2, tmp27, tmp31) tl.store(out_ptr0 + x3, tmp32, xmask) @triton.jit def triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5( in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 3 r2 = rindex x1 = xindex // 3 x3 = xindex tmp13 = tl.load(in_ptr0 + (32 + r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr0 + (16 + r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1, 1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 3.0 tmp6 = 1.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = 5.0 tmp9 = tl.where(tmp2, tmp8, tmp7) tmp10 = tl.full([1, 1], 0, tl.int32) tmp11 = tl.full([1, 1], 2, tl.int32) tmp12 = tmp10 == tmp11 tmp15 = tmp13 * tmp14 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = triton_helpers.minimum(tmp19, tmp6) tmp21 = 6.0 tmp22 = tmp20 * tmp21 tmp23 = tmp9 + tmp22 tmp24 = tmp23 % tmp21 tmp25 = tmp24 != tmp10 tmp26 = libdevice.signbit(tmp24 ) if tmp24.dtype is tl.float32 else tmp24 < 0 tmp27 = libdevice.signbit(tmp21 ) if tmp21.dtype is tl.float32 else tmp21 < 0 tmp28 = tmp26 != tmp27 tmp29 = tmp25 & tmp28 tmp30 = tmp24 + tmp21 tmp31 = tl.where(tmp29, tmp30, tmp24) tmp32 = 4.0 tmp33 = tmp32 - tmp31 tmp34 = triton_helpers.minimum(tmp31, tmp33) tmp35 = triton_helpers.maximum(tmp34, tmp18) tmp36 = tmp11 == tmp11 tmp37 = tl.where(tmp36, tmp15, tmp13) tmp38 = triton_helpers.maximum(tmp37, tmp18) tmp39 = triton_helpers.minimum(tmp38, tmp6) tmp40 = tl.full([1, 1], 1, tl.int32) tmp41 = tmp40 == tmp11 tmp43 = tl.where(tmp41, tmp15, tmp42) tmp44 = triton_helpers.maximum(tmp43, tmp18) tmp45 = triton_helpers.minimum(tmp44, tmp6) tmp46 = tmp39 * tmp45 tmp47 = triton_helpers.minimum(tmp35, tmp6) tmp48 = tmp46 * tmp47 tmp49 = tmp39 - tmp48 tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.where(xmask, tmp50, 0) tmp53 = tl.sum(tmp52, 1)[:, None] tmp54 = 16.0 tmp55 = tmp53 / tmp54 tmp56 = tmp49 - tmp55 tmp58 = tmp56 * tmp57 tmp59 = tmp58 + tmp55 tmp60 = triton_helpers.maximum(tmp59, tmp18) tmp61 = triton_helpers.minimum(tmp60, tmp6) tl.store(out_ptr1 + (r2 + 16 * x3), tmp61, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12, 4), (48, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 triton_poi_fused_index_put_lift_fresh_1[grid(192)](buf0, buf0, 192, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) triton_poi_fused_new_zeros_2[grid(4)](buf7, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf8 = torch.ops.aten.uniform.default(buf7, -4.0, 4.0) buf9 = buf8 del buf8 buf10 = buf7 del buf7 triton_poi_fused_new_ones_3[grid(4)](buf10, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf11 = torch.ops.aten.uniform.default(buf10, 0.0, 5.0) del buf10 buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) triton_poi_fused_add_copy_div_mul_remainder_4[grid(192)](buf0, buf9, buf12, buf13, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf12 buf14 = buf9 del buf9 triton_poi_fused_new_ones_3[grid(4)](buf14, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf15 = torch.ops.aten.uniform.default(buf14, 0.0, 5.0) buf16 = buf15 del buf15 buf20 = reinterpret_tensor(buf14, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf14 buf21 = torch.ops.aten.uniform.default(buf20, 0.0, 5.0) del buf20 buf22 = buf21 del buf21 buf23 = reinterpret_tensor(buf0, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf0 triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5[ grid(12)](buf13, buf16, buf22, buf23, 12, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf13 del buf16 del buf22 return buf23, def hsv2rgb(hsv): """Convert a 4-d HSV tensor to the RGB counterpart. >>> %timeit hsv2rgb_lookup(hsv) 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit hsv2rgb(rgb) 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6) True References [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative """ h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] c = v * s n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) k = (n + h * 6) % 6 t = torch.min(k, 4.0 - k) t = torch.clamp(t, 0, 1) return v - c * t def rgb2hsv(rgb): """Convert a 4-d RGB tensor to the HSV counterpart. Here, we compute hue using atan2() based on the definition in [1], instead of using the common lookup table approach as in [2, 3]. Those values agree when the angle is a multiple of 30°, otherwise they may differ at most ~1.2°. >>> %timeit rgb2hsv_lookup(rgb) 1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit rgb2hsv(rgb) 380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max() tensor(0.0031, device='cuda:0') References [1] https://en.wikipedia.org/wiki/Hue [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html [3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212 """ r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] Cmax = rgb.max(1)[0] Cmin = rgb.min(1)[0] hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) hue = hue % (2 * math.pi) / (2 * math.pi) saturate = 1 - Cmin / (Cmax + 1e-08) value = Cmax hsv = torch.stack([hue, saturate, value], dim=1) hsv[~torch.isfinite(hsv)] = 0.0 return hsv class RandomHSVFunction(Function): @staticmethod def forward(ctx, x, f_h, f_s, f_v): x = rgb2hsv(x) h = x[:, 0, :, :] h += f_h * 255.0 / 360.0 h = h % 1 x[:, 0, :, :] = h x[:, 1, :, :] = x[:, 1, :, :] * f_s x[:, 2, :, :] = x[:, 2, :, :] * f_v x = torch.clamp(x, 0, 1) x = hsv2rgb(x) return x @staticmethod def backward(ctx, grad_output): grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output.clone() return grad_input, None, None, None class ColorJitterLayerNew(nn.Module): def __init__(self, brightness, contrast, saturation, hue): super(ColorJitterLayerNew, self).__init__() self.brightness = self._check_input(brightness, 'brightness') self.contrast = self._check_input(contrast, 'contrast') self.saturation = self._check_input(saturation, 'saturation') self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5 ), clip_first_on_zero=False) def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: raise ValueError( 'If {} is a single number, it must be non negative.'. format(name)) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: raise ValueError('{} values should be between {}'.format( name, bound)) else: raise TypeError( '{} should be a single number or a list/tuple with lenght 2.' .format(name)) if value[0] == value[1] == center: value = None return value def adjust_contrast(self, x): if self.contrast: factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast) means = torch.mean(x, dim=[2, 3], keepdim=True) x = (x - means) * factor + means return torch.clamp(x, 0, 1) def adjust_hsv(self, x): f_h = x.new_zeros(x.size(0), 1, 1) f_s = x.new_ones(x.size(0), 1, 1) f_v = x.new_ones(x.size(0), 1, 1) if self.hue: f_h.uniform_(*self.hue) if self.saturation: f_s = f_s.uniform_(*self.saturation) if self.brightness: f_v = f_v.uniform_(*self.brightness) return RandomHSVFunction.apply(x, f_h, f_s, f_v) def transform(self, inputs): if np.random.rand() > 0.5: transforms = [self.adjust_contrast, self.adjust_hsv] else: transforms = [self.adjust_hsv, self.adjust_contrast] for t in transforms: inputs = t(inputs) return inputs def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hugobloem/PyTorch-StudioGAN
ColorJitterLayer
false
12,654
[ "MIT" ]
0
3deab27c0774adba5a94c7f452d32d4cbc3b117c
https://github.com/hugobloem/PyTorch-StudioGAN/tree/3deab27c0774adba5a94c7f452d32d4cbc3b117c
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/33/c33u5ltxjw6rn4li2j53yyzvr5xmqgu5ezjzwlr4xkh266pnrokm.py # Topologically Sorted Source Nodes: [mul, sum_1, mul_1, add, sum_2, sum_3, add_1, add_2, add_3, truediv, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # mul => mul # mul_1 => mul_1 # sub => sub # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg0_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg1_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 0), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-07), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_3), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = 0.0 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp11 tmp17 = tmp16 + tmp14 tmp18 = 1e-07 tmp19 = tmp17 + tmp18 tmp20 = tmp15 / tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp22, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, sum_1, mul_1, add, sum_2, sum_3, add_1, add_2, add_3, truediv, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DiceLoss(nn.Module): def __init__(self, smooth=0, eps=1e-07): super(DiceLoss, self).__init__() self.smooth = smooth self.eps = eps def forward(self, output, target): return 1 - (2 * torch.sum(output * target) + self.smooth) / (torch. sum(output) + torch.sum(target) + self.smooth + self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = 0.0 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp11 tmp17 = tmp16 + tmp14 tmp18 = 1e-07 tmp19 = tmp17 + tmp18 tmp20 = tmp15 / tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class DiceLossNew(nn.Module): def __init__(self, smooth=0, eps=1e-07): super(DiceLossNew, self).__init__() self.smooth = smooth self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kant/open-solution-ship-detection
DiceLoss
false
12,655
[ "MIT" ]
0
94fa14fc461d6088d884930cbd8e2a2b99a338b5
https://github.com/kant/open-solution-ship-detection/tree/94fa14fc461d6088d884930cbd8e2a2b99a338b5
NetVLAD
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/bf/cbfrkgswbn2pkwqjbw4i7f66ydt4nol3efiwmcxa2rjdv6dwkqje.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # x => pow_1, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) triton_red_fused_linalg_vector_norm_0 = async_compile.triton('triton_red_fused_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[16384, 2048], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 16384 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = (xindex // 4096) _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (8388608*x1)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + (x3), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qa/cqaszodpzbcscr27ul4a4jibczspt3c377awsaeus56ighyyoksv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.div] # Source node to ATen node mapping: # x => div # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = (yindex // 2048) y0 = yindex % 2048 tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + (4096*y1)), None, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tl.store(out_ptr0 + (y0 + (2048*x2) + (8388608*y1)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7w/c7wj3o4hkz7b7hkmngp4dqyks75cnwqwmxqhjj2wgpdb3zojbcgt.py # Topologically Sorted Source Nodes: [conv2d, soft_assign_1], Original ATen: [aten.convolution, aten._softmax] # Source node to ATen node mapping: # conv2d => convolution # soft_assign_1 => amax, exp, sub, sum_2 # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%div, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_per_fused__softmax_convolution_2 = async_compile.triton('triton_per_fused__softmax_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16384, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16384 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + (16*x0)), None) tmp1 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = triton_helpers.max2(tmp3, 1)[:, None] tmp6 = tmp2 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp2, None) tl.store(out_ptr0 + (x0), tmp5, None) tl.store(out_ptr1 + (x0), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/p6/cp6mf4zvc62jxklsm2dgzmyaobltt6bodnipovjl2f4j4b7wn2fm.py # Topologically Sorted Source Nodes: [residual, residual_1, vlad], Original ATen: [aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # residual => sub_1 # residual_1 => mul # vlad => sum_3 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %unsqueeze_1), kwargs = {}) # %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) triton_red_fused_mul_sub_sum_3 = async_compile.triton('triton_red_fused_mul_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[131072, 4096], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 131072 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 2048 x2 = (xindex // 32768) x4 = xindex % 32768 tmp1 = tl.load(in_ptr1 + (x4), None, eviction_policy='evict_last') x1 = (xindex // 2048) % 16 _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x5 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (2048*r3) + (8388608*x2)), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr2 + (x1 + (16*r3) + (65536*x2)), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r3 + (4096*x2)), rmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r3 + (4096*x2)), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask, tmp12, _tmp11) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + (x5), tmp11, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/k6/ck64gjoptjweacs5vakn6svbb5dmx7n72ar2va5c262w5dbjui5l.py # Topologically Sorted Source Nodes: [vlad_1], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # vlad_1 => pow_3, pow_4, sum_4 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {}) # %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) triton_red_fused_linalg_vector_norm_4 = async_compile.triton('triton_red_fused_linalg_vector_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[64, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_linalg_vector_norm_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_linalg_vector_norm_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 64 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask & xmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp3) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/4b/c4by4mz5l3624eqee5nubw66nu7yxydwqpbcoikhk4b6ko4tnfrs.py # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # vlad_3 => pow_5, sum_5 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {}) triton_red_fused_linalg_vector_norm_5 = async_compile.triton('triton_red_fused_linalg_vector_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[16, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_linalg_vector_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_linalg_vector_norm_5(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 16 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + ((4*x0) + (r1 // 2048)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/w4/cw4sjn5i7macqqawovqsorg5splabaed43d6ca36ca2jv2mgsgi6.py # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # vlad_3 => pow_5, pow_6, sum_5 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {}) # %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {}) triton_per_fused_linalg_vector_norm_6 = async_compile.triton('triton_per_fused_linalg_vector_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_6(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/j7/cj72ul6x5mnmg2inrn2wq62lxsfe7nyl4s22mbdkfeugyqa453ed.py # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.div] # Source node to ATen node mapping: # vlad_3 => div_3 # Graph fragment: # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %expand_4), kwargs = {}) triton_poi_fused_div_7 = async_compile.triton('triton_poi_fused_div_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = (xindex // 32768) tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + ((x2 // 2048)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp6 = triton_helpers.maximum(tmp5, tmp2) tmp7 = tmp4 / tmp6 tl.store(out_ptr0 + (x2), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 2048, 64, 64), (8388608, 4096, 64, 1)) assert_size_stride(primals_2, (16, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (16, 2048), (2048, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_red_fused_linalg_vector_norm_0.run(primals_1, buf0, 16384, 2048, grid=grid(16384), stream=stream0) buf1 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 1, 131072, 2048), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_1, buf0, buf1, 8192, 4096, grid=grid(8192, 4096), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 64, 64), (65536, 1, 1024, 16)) buf3 = buf2; del buf2 # reuse buf4 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, soft_assign_1], Original ATen: [aten.convolution, aten._softmax] triton_per_fused__softmax_convolution_2.run(buf3, primals_3, buf4, buf5, 16384, 16, grid=grid(16384), stream=stream0) del primals_3 buf6 = empty_strided_cuda((4, 16, 2048), (32768, 2048, 1), torch.float32) # Topologically Sorted Source Nodes: [residual, residual_1, vlad], Original ATen: [aten.sub, aten.mul, aten.sum] triton_red_fused_mul_sub_sum_3.run(buf1, primals_4, buf3, buf4, buf5, buf6, 131072, 4096, grid=grid(131072), stream=stream0) buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 64), torch.float32) buf8 = reinterpret_tensor(buf7, (4, 16, 1), (16, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [vlad_1], Original ATen: [aten.linalg_vector_norm] triton_red_fused_linalg_vector_norm_4.run(buf8, buf6, 64, 2048, grid=grid(64), stream=stream0) buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm] triton_red_fused_linalg_vector_norm_5.run(buf6, buf8, buf9, 16, 8192, grid=grid(16), stream=stream0) buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf11 = reinterpret_tensor(buf10, (4, 1), (1, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm] triton_per_fused_linalg_vector_norm_6.run(buf11, buf9, 4, 4, grid=grid(4), stream=stream0) del buf9 buf12 = empty_strided_cuda((4, 32768), (32768, 1), torch.float32) # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.div] triton_poi_fused_div_7.run(buf6, buf8, buf11, buf12, 131072, grid=grid(131072), stream=stream0) return (buf12, primals_2, primals_4, buf1, buf3, buf4, buf5, buf6, buf8, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 2048, 64, 64), (8388608, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim import torch.utils.data import torch.nn.functional as F class NetVLAD(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=16, dim=2048, alpha=30.0, normalize_input=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. """ super(NetVLAD, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = alpha self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self._init_params() def _init_params(self): self.conv.weight = nn.Parameter((2.0 * self.alpha * self.centroids) .unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm(dim=1)) def forward(self, x): N, C = x.shape[:2] if self.normalize_input: x = F.normalize(x, p=2, dim=1) soft_assign = self.conv(x).view(N, self.num_clusters, -1) soft_assign = F.softmax(soft_assign, dim=1) x_flatten = x.view(N, C, -1) residual = x_flatten.expand(self.num_clusters, -1, -1, -1).permute( 1, 0, 2, 3) - self.centroids.expand(x_flatten.size(-1), -1, -1 ).permute(1, 2, 0).unsqueeze(0) residual *= soft_assign.unsqueeze(2) vlad = residual.sum(dim=-1) vlad = F.normalize(vlad, p=2, dim=2) vlad = vlad.view(x.size(0), -1) vlad = F.normalize(vlad, p=2, dim=1) return vlad def get_inputs(): return [torch.rand([4, 2048, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 8388608 * x1), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = yindex // 2048 y0 = yindex % 2048 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y1), None, eviction_policy= 'evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tl.store(out_ptr0 + (y0 + 2048 * x2 + 8388608 * y1), tmp5, None) @triton.jit def triton_per_fused__softmax_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 16 * x0), None) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = triton_helpers.max2(tmp3, 1)[:, None] tmp6 = tmp2 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp2, None) tl.store(out_ptr0 + x0, tmp5, None) tl.store(out_ptr1 + x0, tmp10, None) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 2048 x2 = xindex // 32768 x4 = xindex % 32768 tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last') x1 = xindex // 2048 % 16 _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x5 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r3 + 8388608 * x2), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr2 + (x1 + 16 * r3 + 65536 * x2), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r3 + 4096 * x2), rmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r3 + 4096 * x2), rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask, tmp12, _tmp11) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + x5, tmp11, None) @triton.jit def triton_red_fused_linalg_vector_norm_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 64 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask & xmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp3) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_red_fused_linalg_vector_norm_5(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 16 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (4 * x0 + r1 // 2048), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_per_fused_linalg_vector_norm_6(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_div_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x2 // 2048, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp6 = triton_helpers.maximum(tmp5, tmp2) tmp7 = tmp4 / tmp6 tl.store(out_ptr0 + x2, tmp7, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 2048, 64, 64), (8388608, 4096, 64, 1)) assert_size_stride(primals_2, (16, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16, 2048), (2048, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) get_raw_stream(0) triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0, 16384, 2048, XBLOCK=64, RBLOCK=64, num_warps=16, num_stages=1) buf1 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 1, 131072, 2048), torch.float32) triton_poi_fused_div_1[grid(8192, 4096)](primals_1, buf0, buf1, 8192, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 64, 64), (65536, 1, 1024, 16)) buf3 = buf2 del buf2 buf4 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) triton_per_fused__softmax_convolution_2[grid(16384)](buf3, primals_3, buf4, buf5, 16384, 16, XBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf6 = empty_strided_cuda((4, 16, 2048), (32768, 2048, 1), torch. float32) triton_red_fused_mul_sub_sum_3[grid(131072)](buf1, primals_4, buf3, buf4, buf5, buf6, 131072, 4096, XBLOCK=64, RBLOCK=4, num_warps= 8, num_stages=1) buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 64), torch.float32) buf8 = reinterpret_tensor(buf7, (4, 16, 1), (16, 1, 1), 0) del buf7 triton_red_fused_linalg_vector_norm_4[grid(64)](buf8, buf6, 64, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_red_fused_linalg_vector_norm_5[grid(16)](buf6, buf8, buf9, 16, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf11 = reinterpret_tensor(buf10, (4, 1), (1, 1), 0) del buf10 triton_per_fused_linalg_vector_norm_6[grid(4)](buf11, buf9, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf9 buf12 = empty_strided_cuda((4, 32768), (32768, 1), torch.float32) triton_poi_fused_div_7[grid(131072)](buf6, buf8, buf11, buf12, 131072, XBLOCK=512, num_warps=8, num_stages=1) return (buf12, primals_2, primals_4, buf1, buf3, buf4, buf5, buf6, buf8, buf11) class NetVLADNew(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=16, dim=2048, alpha=30.0, normalize_input=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. """ super(NetVLADNew, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = alpha self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self._init_params() def _init_params(self): self.conv.weight = nn.Parameter((2.0 * self.alpha * self.centroids) .unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm(dim=1)) def forward(self, input_0): primals_4 = self.centroids primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
fede-vaccaro/cnnimageretrieval-pytorch
NetVLAD
false
12,656
[ "MIT" ]
0
56bf4ee865e9769801819943f75fff207f0c2f00
https://github.com/fede-vaccaro/cnnimageretrieval-pytorch/tree/56bf4ee865e9769801819943f75fff207f0c2f00
Conv1dWeightNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vw/cvwzmrf7u7j4rr5x5bjsxorzvl7rkn2io72j5uk3mjnapf4ukldf.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_per_fused__weight_norm_interface_0 = async_compile.triton('triton_per_fused__weight_norm_interface_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (16*x0)), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sr/csrhsrdic5iyy4yxrz5ceuduwhllrnbrs3ear4tfocob4nmhl4ke.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %mul, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] stream0 = get_raw_stream(0) triton_per_fused__weight_norm_interface_0.run(buf1, primals_2, primals_1, buf2, 4, 16, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), buf2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1), (4, 1, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf4, primals_3, 4, grid=grid(4), stream=stream0) del primals_3 return (reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf2, primals_1, primals_2, buf1, buf2, reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Conv1dWeightNorm(nn.Module): """ Conv1d with weight normalization """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv1dWeightNorm, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.conv.weight, mean=0.0, std=0.05) if self.conv.bias is not None: nn.init.constant_(self.conv.bias, 0) self.conv = nn.utils.weight_norm(self.conv) def init(self, x, init_scale=1.0): with torch.no_grad(): out = self(x) n_channels = out.size(1) out = out.transpose(0, 1).contiguous().view(n_channels, -1) mean = out.mean(dim=1) std = out.std(dim=1) inv_stdv = init_scale / (std + 1e-06) self.conv.weight_g.mul_(inv_stdv.view(n_channels, 1, 1)) if self.conv.bias is not None: self.conv.bias.add_(-mean).mul_(inv_stdv) return self(x) def forward(self, input): return self.conv(input) def extra_repr(self): return self.conv.extra_repr() def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2, primals_1, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), buf2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1), (4, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(4)](buf4, primals_3, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf4, (4, 1), (1, 1), 0 ), buf2, primals_1, primals_2, buf1, buf2, reinterpret_tensor(primals_4 , (1, 4, 4), (16, 4, 1), 0) class Conv1dWeightNormNew(nn.Module): """ Conv1d with weight normalization """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv1dWeightNormNew, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.conv.weight, mean=0.0, std=0.05) if self.conv.bias is not None: nn.init.constant_(self.conv.bias, 0) self.conv = nn.utils.weight_norm(self.conv) def init(self, x, init_scale=1.0): with torch.no_grad(): out = self(x) n_channels = out.size(1) out = out.transpose(0, 1).contiguous().view(n_channels, -1) mean = out.mean(dim=1) std = out.std(dim=1) inv_stdv = init_scale / (std + 1e-06) self.conv.weight_g.mul_(inv_stdv.view(n_channels, 1, 1)) if self.conv.bias is not None: self.conv.bias.add_(-mean).mul_(inv_stdv) return self(x) def extra_repr(self): return self.conv.extra_repr() def forward(self, input_0): primals_3 = self.conv.bias primals_1 = self.conv.weight_g primals_2 = self.conv.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
juheeuu/flowseq
Conv1dWeightNorm
false
12,657
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
Scaler
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zb/czbpxgc7coll2fuutovx35jzbugs5g2f5fy4pos333z2gsojlifu.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.sub, aten.mul] # Source node to ATen node mapping: # x => sub # x_1 => mul # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {}) triton_poi_fused_mul_sub_0 = async_compile.triton('triton_poi_fused_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp3 * tmp5 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (1, ), (1, )) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.sub, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from abc import ABC class BaseOperator(ABC): """ Abstract class defining the basic structure for operator implementations in Hummingbird. """ def __init__(self, regression=False, classification=False, transformer= False, anomaly_detection=False, **kwargs): super().__init__() self.regression = regression self.classification = classification self.transformer = transformer self.anomaly_detection = anomaly_detection class Scaler(BaseOperator, torch.nn.Module): """ Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max. """ def __init__(self, offset, scale, device): super(Scaler, self).__init__(transformer=True) self.offset = offset self.scale = scale if offset is not None: self.offset = torch.nn.Parameter(torch.FloatTensor([offset]), requires_grad=False) if scale is not None: self.scale = torch.nn.Parameter(torch.FloatTensor([scale]), requires_grad=False) def forward(self, x): if self.offset is not None: x = x - self.offset if self.scale is not None: x = x * self.scale return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'offset': 4, 'scale': 1.0, 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from abc import ABC assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp3 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (1,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sub_0[grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class BaseOperator(ABC): """ Abstract class defining the basic structure for operator implementations in Hummingbird. """ def __init__(self, regression=False, classification=False, transformer= False, anomaly_detection=False, **kwargs): super().__init__() self.regression = regression self.classification = classification self.transformer = transformer self.anomaly_detection = anomaly_detection class ScalerNew(BaseOperator, torch.nn.Module): """ Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max. """ def __init__(self, offset, scale, device): super(ScalerNew, self).__init__(transformer=True) self.offset = offset self.scale = scale if offset is not None: self.offset = torch.nn.Parameter(torch.FloatTensor([offset]), requires_grad=False) if scale is not None: self.scale = torch.nn.Parameter(torch.FloatTensor([scale]), requires_grad=False) def forward(self, input_0): arg0_1 = self.offset arg2_1 = self.scale arg1_1 = input_0 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
kernc/hummingbird
Scaler
false
12,658
[ "MIT" ]
0
8c9d5b1f19054d521b22ad7fcffa8ee10e405ac3
https://github.com/kernc/hummingbird/tree/8c9d5b1f19054d521b22ad7fcffa8ee10e405ac3
ConConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x1_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConConv(nn.Module): def __init__(self, inplanes_x1, inplanes_x2, planes): super(ConConv, self).__init__() self.conv = nn.Conv2d(inplanes_x1 + inplanes_x2, planes, kernel_size=1, bias=True) def forward(self, x1, x2): x1 = torch.cat([x2, x1], dim=1) x1 = self.conv(x1) return x1 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes_x1': 4, 'inplanes_x2': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf2, primals_3, buf0 class ConConvNew(nn.Module): def __init__(self, inplanes_x1, inplanes_x2, planes): super(ConConvNew, self).__init__() self.conv = nn.Conv2d(inplanes_x1 + inplanes_x2, planes, kernel_size=1, bias=True) def forward(self, input_0, input_1): primals_3 = self.conv.weight primals_4 = self.conv.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
karoly-hars/DE_hybrid_CNN
ConConv
false
12,659
[ "BSD-3-Clause" ]
0
d74ba4291d6db335151d5262ab96e8e3806a7587
https://github.com/karoly-hars/DE_hybrid_CNN/tree/d74ba4291d6db335151d5262ab96e8e3806a7587
Net_mish_ranger
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/nt/cnt242zndfm3n4oylofk2hfby6qwsvn3dgogtgqskjur3zjntaph.py # Topologically Sorted Source Nodes: [softplus, tanh, x], Original ATen: [aten.softplus, aten.tanh, aten.mul] # Source node to ATen node mapping: # softplus => exp, gt, log1p, where # tanh => tanh # x => mul # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 20), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %log1p), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %tanh), kwargs = {}) triton_poi_fused_mul_softplus_tanh_0 = async_compile.triton('triton_poi_fused_mul_softplus_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_softplus_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, tanh, x], Original ATen: [aten.softplus, aten.tanh, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus_1, tanh_1, x_1], Original ATen: [aten.softplus, aten.tanh, aten.mul] triton_poi_fused_mul_softplus_tanh_0.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus_2, tanh_2, x_2], Original ATen: [aten.softplus, aten.tanh, aten.mul] triton_poi_fused_mul_softplus_tanh_0.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F def mish(x): return x * torch.tanh(F.softplus(x)) class Net_mish_ranger(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net_mish_ranger, self).__init__() self.hidden1 = torch.nn.Linear(n_feature, n_hidden) self.hidden2 = torch.nn.Linear(n_hidden, n_hidden) self.hidden3 = torch.nn.Linear(n_hidden, n_output) self.predict = torch.nn.Linear(n_output, n_output) def forward(self, x): x = mish(self.hidden1(x)) x = mish(self.hidden2(x)) x = mish(self.hidden3(x)) x = self.predict(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_feature': 4, 'n_hidden': 4, 'n_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_softplus_tanh_0[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_softplus_tanh_0[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), primals_8, primals_6, primals_4 def mish(x): return x * torch.tanh(F.softplus(x)) class Net_mish_rangerNew(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net_mish_rangerNew, self).__init__() self.hidden1 = torch.nn.Linear(n_feature, n_hidden) self.hidden2 = torch.nn.Linear(n_hidden, n_hidden) self.hidden3 = torch.nn.Linear(n_hidden, n_output) self.predict = torch.nn.Linear(n_output, n_output) def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.hidden3.weight primals_7 = self.hidden3.bias primals_8 = self.predict.weight primals_9 = self.predict.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
kartheikiyer/dense_basis_toolbelt
Net_mish_ranger
false
12,660
[ "MIT" ]
0
5cae6e8f4ea6983fba3625f47413d40d6b3bc6e4
https://github.com/kartheikiyer/dense_basis_toolbelt/tree/5cae6e8f4ea6983fba3625f47413d40d6b3bc6e4
SELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ik/cikb25u5x5667mb6rnbleob7teijkbt54zawgiyjor4rhenfpsjm.py # Topologically Sorted Source Nodes: [mse_loss, sum_1], Original ATen: [aten.mse_loss, aten.sum] # Source node to ATen node mapping: # mse_loss => pow_1, sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) triton_poi_fused_mse_loss_sum_0 = async_compile.triton('triton_poi_fused_mse_loss_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mse_loss_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mse_loss_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mse_loss, sum_1], Original ATen: [aten.mse_loss, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mse_loss_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from torch import nn class SELoss(nn.MSELoss): def __init__(self): super().__init__(reduction='none') def forward(self, inputs: 'Tensor', target: 'Tensor') ->Tensor: return super().forward(inputs, target).sum(1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mse_loss_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mse_loss_sum_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class SELossNew(nn.MSELoss): def __init__(self): super().__init__(reduction='none') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kfirgedal/lightning-bolts
SELoss
false
12,661
[ "Apache-2.0" ]
0
cbb8b6c21ca1de757d0f289fb840d59a3b6a10f5
https://github.com/kfirgedal/lightning-bolts/tree/cbb8b6c21ca1de757d0f289fb840d59a3b6a10f5
BPR
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/uk/cukg7ikh2js3f5lgwuhamf4rclidekibnzzkn54t6vyfir6hcsao.py # Topologically Sorted Source Nodes: [u, i, j, mul, x_ui, mul_1, x_uj, x_uij, log_sigmoid, log_prob, norm, pow_1, sum_4, norm_1, pow_2, sum_5, add, norm_2, pow_3, sum_6, add_1, regularization, neg, add_2], Original ATen: [aten.index, aten.mul, aten.sum, aten.sub, aten.log_sigmoid_forward, aten.linalg_vector_norm, aten.pow, aten.add, aten.neg] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # i => index_1 # j => index_2 # log_prob => sum_3 # log_sigmoid => abs_1, exp, full_default, log1p, minimum, neg, sub_1 # mul => mul # mul_1 => mul_1 # neg => neg_1 # norm => pow_1, pow_2, sum_4 # norm_1 => pow_4, pow_5, sum_6 # norm_2 => pow_7, pow_8, sum_8 # pow_1 => pow_3 # pow_2 => pow_6 # pow_3 => pow_9 # regularization => mul_2 # sum_4 => sum_5 # sum_5 => sum_7 # sum_6 => sum_9 # u => index # x_ui => sum_1 # x_uij => sub # x_uj => sum_2 # Graph fragment: # %index : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_1, [%primals_2]), kwargs = {}) # %index_1 : [num_users=2] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_3, [%primals_4]), kwargs = {}) # %index_2 : [num_users=2] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_3, [%primals_5]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %sub), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_3,), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index_1, 2), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [1]), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_6, 0.5), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_5, 2), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_6,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, %sum_7), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index_2, 2), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [1]), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_8, 0.5), kwargs = {}) # %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_8, 2), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_9,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %sum_9), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_3,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, %mul_2), kwargs = {}) triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*i64', 4: '*fp32', 5: '*i64', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=(6,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp7 = tl.load(in_ptr2 + (r0), None) tmp26 = tl.load(in_ptr4 + (r0), None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (4*tmp4), None, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert((0 <= tmp10) & (tmp10 < 4), "index out of bounds: 0 <= tmp10 < 4") tmp12 = tl.load(in_ptr3 + (4*tmp10), None, eviction_policy='evict_last') tmp13 = tmp6 * tmp12 tmp14 = tl.load(in_ptr1 + (1 + (4*tmp4)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr3 + (1 + (4*tmp10)), None, eviction_policy='evict_last') tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tl.load(in_ptr1 + (2 + (4*tmp4)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr3 + (2 + (4*tmp10)), None, eviction_policy='evict_last') tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl.load(in_ptr1 + (3 + (4*tmp4)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr3 + (3 + (4*tmp10)), None, eviction_policy='evict_last') tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp27 = tmp26 + tmp1 tmp28 = tmp26 < 0 tmp29 = tl.where(tmp28, tmp27, tmp26) tl.device_assert((0 <= tmp29) & (tmp29 < 4), "index out of bounds: 0 <= tmp29 < 4") tmp31 = tl.load(in_ptr3 + (4*tmp29), None, eviction_policy='evict_last') tmp32 = tmp6 * tmp31 tmp33 = tl.load(in_ptr3 + (1 + (4*tmp29)), None, eviction_policy='evict_last') tmp34 = tmp14 * tmp33 tmp35 = tmp32 + tmp34 tmp36 = tl.load(in_ptr3 + (2 + (4*tmp29)), None, eviction_policy='evict_last') tmp37 = tmp18 * tmp36 tmp38 = tmp35 + tmp37 tmp39 = tl.load(in_ptr3 + (3 + (4*tmp29)), None, eviction_policy='evict_last') tmp40 = tmp22 * tmp39 tmp41 = tmp38 + tmp40 tmp42 = tmp6 * tmp6 tmp43 = tmp14 * tmp14 tmp44 = tmp42 + tmp43 tmp45 = tmp18 * tmp18 tmp46 = tmp44 + tmp45 tmp47 = tmp22 * tmp22 tmp48 = tmp46 + tmp47 tmp49 = libdevice.sqrt(tmp48) tmp50 = tmp49 * tmp49 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = tl.sum(tmp51, 1)[:, None] tmp54 = tmp12 * tmp12 tmp55 = tmp15 * tmp15 tmp56 = tmp54 + tmp55 tmp57 = tmp19 * tmp19 tmp58 = tmp56 + tmp57 tmp59 = tmp23 * tmp23 tmp60 = tmp58 + tmp59 tmp61 = libdevice.sqrt(tmp60) tmp62 = tmp61 * tmp61 tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK]) tmp65 = tl.sum(tmp63, 1)[:, None] tmp66 = tmp31 * tmp31 tmp67 = tmp33 * tmp33 tmp68 = tmp66 + tmp67 tmp69 = tmp36 * tmp36 tmp70 = tmp68 + tmp69 tmp71 = tmp39 * tmp39 tmp72 = tmp70 + tmp71 tmp73 = libdevice.sqrt(tmp72) tmp74 = tmp73 * tmp73 tmp75 = tl.broadcast_to(tmp74, [XBLOCK, RBLOCK]) tmp77 = tl.sum(tmp75, 1)[:, None] tmp78 = tmp25 - tmp41 tmp79 = 0.0 tmp80 = triton_helpers.minimum(tmp79, tmp78) tmp81 = tl_math.abs(tmp78) tmp82 = -tmp81 tmp83 = tl_math.exp(tmp82) tmp84 = libdevice.log1p(tmp83) tmp85 = tmp80 - tmp84 tmp86 = tl.broadcast_to(tmp85, [XBLOCK, RBLOCK]) tmp88 = tl.sum(tmp86, 1)[:, None] tmp89 = -tmp88 tmp90 = tmp53 + tmp65 tmp91 = tmp90 + tmp77 tmp92 = 4.0 tmp93 = tmp91 * tmp92 tmp94 = tmp89 + tmp93 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp94, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf6 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [u, i, j, mul, x_ui, mul_1, x_uj, x_uij, log_sigmoid, log_prob, norm, pow_1, sum_4, norm_1, pow_2, sum_5, add, norm_2, pow_3, sum_6, add_1, regularization, neg, add_2], Original ATen: [aten.index, aten.mul, aten.sum, aten.sub, aten.log_sigmoid_forward, aten.linalg_vector_norm, aten.pow, aten.add, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0.run(buf6, primals_2, primals_1, primals_4, primals_3, primals_5, 1, 4, grid=grid(1), stream=stream0) return (buf6, primals_1, primals_2, primals_3, primals_4, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BPR(nn.Module): def __init__(self, user_size, item_size, dim, weight_decay): super().__init__() self.W = nn.Parameter(torch.empty(user_size, dim)) None self.H = nn.Parameter(torch.empty(item_size, dim)) None nn.init.xavier_normal_(self.W.data) nn.init.xavier_normal_(self.H.data) self.weight_decay = weight_decay def forward(self, u, i, j): """Return loss value. Args: u(torch.LongTensor): tensor stored user indexes. [batch_size,] i(torch.LongTensor): tensor stored item indexes which is prefered by user. [batch_size,] j(torch.LongTensor): tensor stored item indexes which is not prefered by user. [batch_size,] Returns: torch.FloatTensor """ u = self.W[u, :] i = self.H[i, :] j = self.H[j, :] x_ui = torch.mul(u, i).sum(dim=1) x_uj = torch.mul(u, j).sum(dim=1) x_uij = x_ui - x_uj log_prob = F.logsigmoid(x_uij).sum() regularization = self.weight_decay * (u.norm(dim=1).pow(2).sum() + i.norm(dim=1).pow(2).sum() + j.norm(dim=1).pow(2).sum()) return -log_prob + regularization def recommend(self, u): """Return recommended item list given users. Args: u(torch.LongTensor): tensor stored user indexes. [batch_size,] Returns: pred(torch.LongTensor): recommended item list sorted by preference. [batch_size, item_size] """ u = self.W[u, :] x_ui = torch.mm(u, self.H.t()) pred = torch.argsort(x_ui, dim=1) return pred def get_inputs(): return [torch.ones([4], dtype=torch.int64), torch.ones([4], dtype=torch .int64), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'user_size': 4, 'item_size': 4, 'dim': 4, 'weight_decay': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr2 + r0, None) tmp26 = tl.load(in_ptr4 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + 4 * tmp4, None, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert((0 <= tmp10) & (tmp10 < 4), 'index out of bounds: 0 <= tmp10 < 4') tmp12 = tl.load(in_ptr3 + 4 * tmp10, None, eviction_policy='evict_last') tmp13 = tmp6 * tmp12 tmp14 = tl.load(in_ptr1 + (1 + 4 * tmp4), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr3 + (1 + 4 * tmp10), None, eviction_policy= 'evict_last') tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tl.load(in_ptr1 + (2 + 4 * tmp4), None, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr3 + (2 + 4 * tmp10), None, eviction_policy= 'evict_last') tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl.load(in_ptr1 + (3 + 4 * tmp4), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr3 + (3 + 4 * tmp10), None, eviction_policy= 'evict_last') tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp27 = tmp26 + tmp1 tmp28 = tmp26 < 0 tmp29 = tl.where(tmp28, tmp27, tmp26) tl.device_assert((0 <= tmp29) & (tmp29 < 4), 'index out of bounds: 0 <= tmp29 < 4') tmp31 = tl.load(in_ptr3 + 4 * tmp29, None, eviction_policy='evict_last') tmp32 = tmp6 * tmp31 tmp33 = tl.load(in_ptr3 + (1 + 4 * tmp29), None, eviction_policy= 'evict_last') tmp34 = tmp14 * tmp33 tmp35 = tmp32 + tmp34 tmp36 = tl.load(in_ptr3 + (2 + 4 * tmp29), None, eviction_policy= 'evict_last') tmp37 = tmp18 * tmp36 tmp38 = tmp35 + tmp37 tmp39 = tl.load(in_ptr3 + (3 + 4 * tmp29), None, eviction_policy= 'evict_last') tmp40 = tmp22 * tmp39 tmp41 = tmp38 + tmp40 tmp42 = tmp6 * tmp6 tmp43 = tmp14 * tmp14 tmp44 = tmp42 + tmp43 tmp45 = tmp18 * tmp18 tmp46 = tmp44 + tmp45 tmp47 = tmp22 * tmp22 tmp48 = tmp46 + tmp47 tmp49 = libdevice.sqrt(tmp48) tmp50 = tmp49 * tmp49 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = tl.sum(tmp51, 1)[:, None] tmp54 = tmp12 * tmp12 tmp55 = tmp15 * tmp15 tmp56 = tmp54 + tmp55 tmp57 = tmp19 * tmp19 tmp58 = tmp56 + tmp57 tmp59 = tmp23 * tmp23 tmp60 = tmp58 + tmp59 tmp61 = libdevice.sqrt(tmp60) tmp62 = tmp61 * tmp61 tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK]) tmp65 = tl.sum(tmp63, 1)[:, None] tmp66 = tmp31 * tmp31 tmp67 = tmp33 * tmp33 tmp68 = tmp66 + tmp67 tmp69 = tmp36 * tmp36 tmp70 = tmp68 + tmp69 tmp71 = tmp39 * tmp39 tmp72 = tmp70 + tmp71 tmp73 = libdevice.sqrt(tmp72) tmp74 = tmp73 * tmp73 tmp75 = tl.broadcast_to(tmp74, [XBLOCK, RBLOCK]) tmp77 = tl.sum(tmp75, 1)[:, None] tmp78 = tmp25 - tmp41 tmp79 = 0.0 tmp80 = triton_helpers.minimum(tmp79, tmp78) tmp81 = tl_math.abs(tmp78) tmp82 = -tmp81 tmp83 = tl_math.exp(tmp82) tmp84 = libdevice.log1p(tmp83) tmp85 = tmp80 - tmp84 tmp86 = tl.broadcast_to(tmp85, [XBLOCK, RBLOCK]) tmp88 = tl.sum(tmp86, 1)[:, None] tmp89 = -tmp88 tmp90 = tmp53 + tmp65 tmp91 = tmp90 + tmp77 tmp92 = 4.0 tmp93 = tmp91 * tmp92 tmp94 = tmp89 + tmp93 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp94, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf6 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0[ grid(1)](buf6, primals_2, primals_1, primals_4, primals_3, primals_5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) return buf6, primals_1, primals_2, primals_3, primals_4, primals_5 class BPRNew(nn.Module): def __init__(self, user_size, item_size, dim, weight_decay): super().__init__() self.W = nn.Parameter(torch.empty(user_size, dim)) None self.H = nn.Parameter(torch.empty(item_size, dim)) None nn.init.xavier_normal_(self.W.data) nn.init.xavier_normal_(self.H.data) self.weight_decay = weight_decay def recommend(self, u): """Return recommended item list given users. Args: u(torch.LongTensor): tensor stored user indexes. [batch_size,] Returns: pred(torch.LongTensor): recommended item list sorted by preference. [batch_size, item_size] """ u = self.W[u, :] x_ui = torch.mm(u, self.H.t()) pred = torch.argsort(x_ui, dim=1) return pred def forward(self, input_0, input_1, input_2): primals_1 = self.W primals_3 = self.H primals_2 = input_0 primals_4 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
kerengaiger/bpr
BPR
false
12,662
[ "MIT" ]
0
66bfa57469a9c70ba5b9158fde5210abe1bd8d7b
https://github.com/kerengaiger/bpr/tree/66bfa57469a9c70ba5b9158fde5210abe1bd8d7b
SimulatorReward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5j/c5ji4mfxenghd3ccczky5osir42aijmeisydrv7ufxv2edv4ktf6.py # Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/l5/cl567j2pbsgcv6nt7ux6tlu6b7q2zlzodpyhx5vc6sfgkf4lnrcu.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/om/comxhxljgusfvchwfatyy3zlyu63ectx7cc43vhpq4slnbeelc4a.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hx/chxm27gfgqrmlk4dtntfyb4eusceejl3zp6b5gugzys3ftbpi4v5.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_5 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5d/c5dvgve24hbvrdcz5w4qrfj4r34h3se3x2w5fculeyheewlhp45h.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_6 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3y/c3yxwuwmqvwyaal5sg5yv4hb66vvtdtpd7juzykvfeirynnm6y36.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 3) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (3*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (3*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (3*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp0 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tmp1 - tmp5 tmp9 = tl_math.exp(tmp8) tmp10 = tmp2 - tmp5 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tmp4 - tmp5 tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp16 = tmp7 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (200, 512), (512, 1)) assert_size_stride(primals_9, (200, ), (1, )) assert_size_stride(primals_10, (100, 200), (200, 1)) assert_size_stride(primals_11, (100, ), (1, )) assert_size_stride(primals_12, (3, 100), (100, 1)) assert_size_stride(primals_13, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 512, grid=grid(512), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 4, 4), (256, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 1024, grid=grid(1024), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_7, 2048, grid=grid(2048), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (4, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 200), (1, 512), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] triton_poi_fused_relu_3.run(buf7, primals_9, 800, grid=grid(800), stream=stream0) del primals_9 buf8 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (200, 100), (1, 200), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_11, 400, grid=grid(400), stream=stream0) del primals_11 buf10 = empty_strided_cuda((4, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf9, reinterpret_tensor(primals_12, (100, 3), (1, 100), 0), alpha=1, beta=1, out=buf10) del primals_13 buf11 = empty_strided_cuda((4, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf10, buf11, 12, grid=grid(12), stream=stream0) del buf10 return (buf11, primals_2, primals_4, primals_6, primals_1, buf1, buf3, reinterpret_tensor(buf5, (4, 512), (512, 1), 0), buf7, buf9, buf11, primals_12, primals_10, primals_8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((200, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((100, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((3, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class SimulatorReward(torch.nn.Module): def __init__(self): super(SimulatorReward, self).__init__() self.conv1 = torch.nn.Conv2d(4, 8, kernel_size=3, padding=1) self.conv2 = torch.nn.Conv2d(8, 16, kernel_size=3, padding=1) self.conv3 = torch.nn.Conv2d(16, 32, kernel_size=3, padding=1) self.fc1 = torch.nn.Linear(512, 200) self.fc2 = torch.nn.Linear(200, 100) self.fc3 = torch.nn.Linear(100, 3) def forward(self, x): x = x.reshape(-1, 4, 4, 4) x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = self.conv3(x) x = x.view(-1, 512) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 3 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 3 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 3 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp0 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tmp1 - tmp5 tmp9 = tl_math.exp(tmp8) tmp10 = tmp2 - tmp5 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tmp4 - tmp5 tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp16 = tmp7 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (200, 512), (512, 1)) assert_size_stride(primals_9, (200,), (1,)) assert_size_stride(primals_10, (100, 200), (200, 1)) assert_size_stride(primals_11, (100,), (1,)) assert_size_stride(primals_12, (3, 100), (100, 1)) assert_size_stride(primals_13, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(512)](buf1, primals_3, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 4, 4), (256, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(1024)](buf3, primals_5, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(2048)](buf5, primals_7, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (4, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 200), (1, 512), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_3[grid(800)](buf7, primals_9, 800, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (200, 100), (1, 200), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(400)](buf9, primals_11, 400, XBLOCK= 256, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((4, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_13, buf9, reinterpret_tensor( primals_12, (100, 3), (1, 100), 0), alpha=1, beta=1, out=buf10) del primals_13 buf11 = empty_strided_cuda((4, 3), (3, 1), torch.float32) triton_poi_fused__softmax_5[grid(12)](buf10, buf11, 12, XBLOCK=16, num_warps=1, num_stages=1) del buf10 return (buf11, primals_2, primals_4, primals_6, primals_1, buf1, buf3, reinterpret_tensor(buf5, (4, 512), (512, 1), 0), buf7, buf9, buf11, primals_12, primals_10, primals_8) class SimulatorRewardNew(torch.nn.Module): def __init__(self): super(SimulatorRewardNew, self).__init__() self.conv1 = torch.nn.Conv2d(4, 8, kernel_size=3, padding=1) self.conv2 = torch.nn.Conv2d(8, 16, kernel_size=3, padding=1) self.conv3 = torch.nn.Conv2d(16, 32, kernel_size=3, padding=1) self.fc1 = torch.nn.Linear(512, 200) self.fc2 = torch.nn.Linear(200, 100) self.fc3 = torch.nn.Linear(100, 3) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_12 = self.fc3.weight primals_13 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
karshtharyani/DeepReinforcementLearningInAction
SimulatorReward
false
12,663
[ "MIT" ]
0
9dc40a43b43f05daf9aecb7e3ec7592cf38720e5
https://github.com/karshtharyani/DeepReinforcementLearningInAction/tree/9dc40a43b43f05daf9aecb7e3ec7592cf38720e5
UnpoolingAsConvolution
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ca/ccacj4wpx7z2m7f2urwr37plqbnkovuvco2er2l6bllg7a7ckqai.py # Topologically Sorted Source Nodes: [padded_b], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # padded_b => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_3, [1, 1, 0, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 5 x0 = xindex % 6 x2 = (xindex // 30) x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = (-1) + x0 tmp4 = tl.full([1], 0, tl.int64) tmp5 = tmp3 >= tmp4 tmp6 = tmp3 < tmp1 tmp7 = tmp2 & tmp5 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1) + (16*x2)), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zx/czx2juz5rjbhbiirukrz6d6kx7jco6dqgpkpvvc7m75kxtwkey4h.py # Topologically Sorted Source Nodes: [padded_c], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # padded_c => constant_pad_nd_1 # Graph fragment: # %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_3, [0, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 5) % 6 x0 = xindex % 5 x2 = (xindex // 30) x3 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 < tmp3 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1) + (16*x2)), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/cd/ccd5s4fpd3lxf637fdxhtl6drfxl33ps3hjs7hw2ls5lojfpirc2.py # Topologically Sorted Source Nodes: [padded_d], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # padded_d => constant_pad_nd_2 # Graph fragment: # %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_3, [0, 1, 0, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 5) % 5 x0 = xindex % 5 x2 = (xindex // 25) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7k/c7kzycclc2im22az3rwdu4aimn2tldhwgz27hjvwwhc2sfhyjnh2.py # Topologically Sorted Source Nodes: [stacked_2], Original ATen: [aten.stack] # Source node to ATen node mapping: # stacked_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 4), kwargs = {}) triton_poi_fused_stack_3 = async_compile.triton('triton_poi_fused_stack_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 4 x2 = (xindex // 8) % 8 x5 = (xindex // 64) x3 = (xindex // 64) % 4 x6 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x1 + (4*(x2 % 2)) tmp6 = tmp5 >= tmp1 tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tmp8 & tmp4 tmp10 = tl.load(in_ptr0 + ((4*(x2 // 2)) + (16*x5) + (x1 + (4*(x2 % 2)))), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr1 + (x3), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp5 >= tmp7 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp5 < tmp16 tmp18 = tmp15 & tmp4 tmp19 = tl.load(in_ptr2 + ((4*(x2 // 2)) + (16*x5) + ((-4) + x1 + (4*(x2 % 2)))), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr3 + (x3), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp18, tmp21, tmp22) tmp24 = tl.where(tmp8, tmp14, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp4, tmp24, tmp25) tmp27 = tmp0 >= tmp3 tmp28 = tl.full([1], 2, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp8 & tmp27 tmp31 = tl.load(in_ptr4 + ((4*(x2 // 2)) + (16*x5) + (x1 + (4*(x2 % 2)))), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tl.load(in_ptr5 + (x3), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp31 + tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tmp15 & tmp27 tmp37 = tl.load(in_ptr6 + ((4*(x2 // 2)) + (16*x5) + ((-4) + x1 + (4*(x2 % 2)))), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (x3), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tmp37 + tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp36, tmp39, tmp40) tmp42 = tl.where(tmp8, tmp35, tmp41) tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp27, tmp42, tmp43) tmp45 = tl.where(tmp4, tmp26, tmp44) tl.store(out_ptr0 + (x6), tmp45, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 2), (24, 6, 2, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [output_a], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_b], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_3, buf1, 480, grid=grid(480), stream=stream0) # Topologically Sorted Source Nodes: [output_b], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 6, 5), (120, 30, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_c], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_1.run(primals_3, buf3, 480, grid=grid(480), stream=stream0) # Topologically Sorted Source Nodes: [output_c], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_d], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_2.run(primals_3, buf5, 400, grid=grid(400), stream=stream0) # Topologically Sorted Source Nodes: [output_d], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 8, 4, 2), (256, 64, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [stacked_2], Original ATen: [aten.stack] triton_poi_fused_stack_3.run(buf0, primals_2, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 1024, grid=grid(1024), stream=stream0) del buf0 del buf2 del buf4 del buf6 del primals_2 del primals_5 del primals_7 del primals_9 return (reinterpret_tensor(buf7, (4, 4, 8, 8), (256, 64, 8, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 2, 3), (24, 6, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 2), (24, 6, 2, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def get_incoming_shape(incoming): size = incoming.size() return [size[0], size[1], size[2], size[3]] def interleave(tensors, axis): old_shape = get_incoming_shape(tensors[0])[1:] new_shape = [-1] + old_shape new_shape[axis] *= len(tensors) stacked = torch.stack(tensors, axis + 1) reshaped = stacked.view(new_shape) return reshaped class UnpoolingAsConvolution(nn.Module): def __init__(self, inplanes, planes): super(UnpoolingAsConvolution, self).__init__() self.conv_A = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(3, 3), stride=1, padding=1) self.conv_B = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(2, 3), stride=1, padding=0) self.conv_C = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(3, 2), stride=1, padding=0) self.conv_D = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(2, 2), stride=1, padding=0) def forward(self, x): output_a = self.conv_A(x) padded_b = nn.functional.pad(x, (1, 1, 0, 1)) output_b = self.conv_B(padded_b) padded_c = nn.functional.pad(x, (0, 1, 1, 1)) output_c = self.conv_C(padded_c) padded_d = nn.functional.pad(x, (0, 1, 0, 1)) output_d = self.conv_D(padded_d) left = interleave([output_a, output_b], axis=2) right = interleave([output_c, output_d], axis=2) y = interleave([left, right], axis=3) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 5 x0 = xindex % 6 x2 = xindex // 30 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = -1 + x0 tmp4 = tl.full([1], 0, tl.int64) tmp5 = tmp3 >= tmp4 tmp6 = tmp3 < tmp1 tmp7 = tmp2 & tmp5 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 6 x0 = xindex % 5 x2 = xindex // 30 x3 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 < tmp3 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 4 x2 = xindex // 8 % 8 x5 = xindex // 64 x3 = xindex // 64 % 4 x6 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x1 + 4 * (x2 % 2) tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tmp8 & tmp4 tmp10 = tl.load(in_ptr0 + (4 * (x2 // 2) + 16 * x5 + (x1 + 4 * (x2 % 2) )), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr1 + x3, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp5 >= tmp7 tl.full([1], 8, tl.int64) tmp18 = tmp15 & tmp4 tmp19 = tl.load(in_ptr2 + (4 * (x2 // 2) + 16 * x5 + (-4 + x1 + 4 * (x2 % 2))), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr3 + x3, tmp18 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp18, tmp21, tmp22) tmp24 = tl.where(tmp8, tmp14, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp4, tmp24, tmp25) tmp27 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp30 = tmp8 & tmp27 tmp31 = tl.load(in_ptr4 + (4 * (x2 // 2) + 16 * x5 + (x1 + 4 * (x2 % 2) )), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tl.load(in_ptr5 + x3, tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp33 = tmp31 + tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tmp15 & tmp27 tmp37 = tl.load(in_ptr6 + (4 * (x2 // 2) + 16 * x5 + (-4 + x1 + 4 * (x2 % 2))), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + x3, tmp36 & xmask, eviction_policy= 'evict_last', other=0.0) tmp39 = tmp37 + tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp36, tmp39, tmp40) tmp42 = tl.where(tmp8, tmp35, tmp41) tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp27, tmp42, tmp43) tmp45 = tl.where(tmp4, tmp26, tmp44) tl.store(out_ptr0 + x6, tmp45, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 2), (24, 6, 2, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(480)](primals_3, buf1, 480, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 6, 5), (120, 30, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_1[grid(480)](primals_3, buf3, 480, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_2[grid(400)](primals_3, buf5, 400, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 8, 4, 2), (256, 64, 8, 2, 1), torch.float32) triton_poi_fused_stack_3[grid(1024)](buf0, primals_2, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 1024, XBLOCK =128, num_warps=4, num_stages=1) del buf0 del buf2 del buf4 del buf6 del primals_2 del primals_5 del primals_7 del primals_9 return reinterpret_tensor(buf7, (4, 4, 8, 8), (256, 64, 8, 1), 0 ), primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5 def get_incoming_shape(incoming): size = incoming.size() return [size[0], size[1], size[2], size[3]] def interleave(tensors, axis): old_shape = get_incoming_shape(tensors[0])[1:] new_shape = [-1] + old_shape new_shape[axis] *= len(tensors) stacked = torch.stack(tensors, axis + 1) reshaped = stacked.view(new_shape) return reshaped class UnpoolingAsConvolutionNew(nn.Module): def __init__(self, inplanes, planes): super(UnpoolingAsConvolutionNew, self).__init__() self.conv_A = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(3, 3), stride=1, padding=1) self.conv_B = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(2, 3), stride=1, padding=0) self.conv_C = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(3, 2), stride=1, padding=0) self.conv_D = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(2, 2), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv_A.weight primals_2 = self.conv_A.bias primals_4 = self.conv_B.weight primals_5 = self.conv_B.bias primals_6 = self.conv_C.weight primals_7 = self.conv_C.bias primals_8 = self.conv_D.weight primals_9 = self.conv_D.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
karoly-hars/DE_hybrid_CNN
UnpoolingAsConvolution
false
12,664
[ "BSD-3-Clause" ]
0
d74ba4291d6db335151d5262ab96e8e3806a7587
https://github.com/karoly-hars/DE_hybrid_CNN/tree/d74ba4291d6db335151d5262ab96e8e3806a7587
ActorCriticMLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/g5/cg5f2rptqnpi2mrqpqc4tujqpbrrrjrse6plhgftx425znsffpfv.py # Topologically Sorted Source Nodes: [a], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # a => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yh/cyhogxneodczl7mcnuf7mkhxldvr2nc5wj5e42agntthff4e45p7.py # Topologically Sorted Source Nodes: [a], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # a => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (128, 4), (4, 1)) assert_size_stride(primals_3, (128, ), (1, )) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 128), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf7, 8192, grid=grid(8192), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [a], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [c], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_7 return (buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from torch import nn from typing import Tuple from torch.nn import functional as F class ActorCriticMLP(nn.Module): """MLP network with heads for actor and critic.""" def __init__(self, input_shape: 'Tuple[int]', n_actions: 'int', hidden_size: 'int'=128): """ Args: input_shape: observation shape of the environment n_actions: number of discrete actions available in the environment hidden_size: size of hidden layers """ super().__init__() self.fc1 = nn.Linear(input_shape[0], hidden_size) self.actor_head = nn.Linear(hidden_size, n_actions) self.critic_head = nn.Linear(hidden_size, 1) def forward(self, x) ->Tuple[Tensor, Tensor]: """Forward pass through network. Calculates the action logits and the value. Args: x: input to network Returns: action log probs (logits), value """ x = F.relu(self.fc1(x.float())) a = F.log_softmax(self.actor_head(x), dim=-1) c = self.critic_head(x) return a, c def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': [4, 4], 'n_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from typing import Tuple assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (128, 4), (4, 1)) assert_size_stride(primals_3, (128,), (1,)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 128), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_3, buf7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_7 return buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_6, primals_4, buf7 class ActorCriticMLPNew(nn.Module): """MLP network with heads for actor and critic.""" def __init__(self, input_shape: 'Tuple[int]', n_actions: 'int', hidden_size: 'int'=128): """ Args: input_shape: observation shape of the environment n_actions: number of discrete actions available in the environment hidden_size: size of hidden layers """ super().__init__() self.fc1 = nn.Linear(input_shape[0], hidden_size) self.actor_head = nn.Linear(hidden_size, n_actions) self.critic_head = nn.Linear(hidden_size, 1) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.actor_head.weight primals_5 = self.actor_head.bias primals_6 = self.critic_head.weight primals_7 = self.critic_head.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
kfirgedal/lightning-bolts
ActorCriticMLP
false
12,665
[ "Apache-2.0" ]
0
cbb8b6c21ca1de757d0f289fb840d59a3b6a10f5
https://github.com/kfirgedal/lightning-bolts/tree/cbb8b6c21ca1de757d0f289fb840d59a3b6a10f5
SDNE_layer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/jo/cjoedcmy7gkzotfopp7atueg5hlb65rgyaj7o2wkgwedzdx5r26r.py # Topologically Sorted Source Nodes: [t0], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # t0 => gt, mul, where # Graph fragment: # %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_2), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.01), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_2, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zc/czc7u2dq2vtql25zagxwmpr4hs6rlgasv7bt6hnyjlwyehn7iert.py # Topologically Sorted Source Nodes: [t0_3, sub, mul_1, mul_2, mul_5, L_2nd], Original ATen: [aten.leaky_relu, aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # L_2nd => sum_2 # mul_1 => mul_5 # mul_2 => mul_6 # mul_5 => mul_9 # sub => sub # t0_3 => gt_3, mul_3, where_3 # Graph fragment: # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_3, 0.01), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %addmm_3, %mul_3), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %where_3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, 4), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %mul_6), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul_9,), kwargs = {}) triton_per_fused_leaky_relu_mul_sub_sum_1 = async_compile.triton('triton_per_fused_leaky_relu_mul_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_leaky_relu_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_leaky_relu_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = 0.0 tmp3 = tmp1 > tmp2 tmp4 = 0.01 tmp5 = tmp1 * tmp4 tmp6 = tl.where(tmp3, tmp1, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tmp7 * tmp0 tmp9 = 4.0 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sj/csjofojkruavhzqz4olfzmhyc3z7nkdnkvkw2mil5b5g3poqp465.py # Topologically Sorted Source Nodes: [trace, L_1st, mul_30, mul_31, add_9], Original ATen: [aten.trace, aten.mul, aten.add] # Source node to ATen node mapping: # L_1st => mul_4 # add_9 => add_16 # mul_30 => mul_34 # mul_31 => mul_35 # trace => diagonal_copy, sum_1 # Graph fragment: # %diagonal_copy : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%mm_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%diagonal_copy,), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, 4), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, 4), kwargs = {}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_35, %sum_2), kwargs = {}) triton_per_fused_add_mul_trace_2 = async_compile.triton('triton_per_fused_add_mul_trace_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_trace_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_trace_2(in_ptr0, in_ptr1, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (5*r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (0)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp10 = tmp7 + tmp9 tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None) tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pm/cpmcm6vae4i5kfw32dc5glavrbpkpf57x2zxlc24hgiogzll476e.py # Topologically Sorted Source Nodes: [abs_2, sum_4, mul_10, sum_5], Original ATen: [aten.abs, aten.sum, aten.mul] # Source node to ATen node mapping: # abs_2 => abs_2 # mul_10 => mul_14 # sum_4 => sum_5 # sum_5 => sum_6 # Graph fragment: # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_2,), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_2,), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_2), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_14,), kwargs = {}) triton_per_fused_abs_mul_sum_3 = async_compile.triton('triton_per_fused_abs_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mul_sum_3(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/e4/ce4hfyn5vohuqnghdltzoskekpfblvr522oitv6ouzcvnumqzc64.py # Topologically Sorted Source Nodes: [abs_1, sum_2, mul_6, mul_7, sum_3, mul_8, add, L_reg, mul_9, mul_11, add_2, L_reg_1, abs_3, sum_6, mul_12, mul_13, sum_7, mul_14, add_3, L_reg_2, mul_15, mul_17, add_4, L_reg_3, abs_5, sum_10, mul_18, mul_19, sum_11, mul_20, add_5, L_reg_4, mul_21, mul_23, add_6, L_reg_5, abs_7, sum_14, mul_24, mul_25, sum_15, mul_26, add_7, L_reg_6, mul_27, mul_29, add_8, L_reg_7], Original ATen: [aten.abs, aten.sum, aten.mul, aten.add] # Source node to ATen node mapping: # L_reg => add_1 # L_reg_1 => add_3 # L_reg_2 => add_5 # L_reg_3 => add_7 # L_reg_4 => add_9 # L_reg_5 => add_11 # L_reg_6 => add_13 # L_reg_7 => add_15 # abs_1 => abs_1 # abs_3 => abs_3 # abs_5 => abs_5 # abs_7 => abs_7 # add => add # add_2 => add_2 # add_3 => add_4 # add_4 => add_6 # add_5 => add_8 # add_6 => add_10 # add_7 => add_12 # add_8 => add_14 # mul_11 => mul_15 # mul_12 => mul_16 # mul_13 => mul_17 # mul_14 => mul_18 # mul_15 => mul_19 # mul_17 => mul_21 # mul_18 => mul_22 # mul_19 => mul_23 # mul_20 => mul_24 # mul_21 => mul_25 # mul_23 => mul_27 # mul_24 => mul_28 # mul_25 => mul_29 # mul_26 => mul_30 # mul_27 => mul_31 # mul_29 => mul_33 # mul_6 => mul_10 # mul_7 => mul_11 # mul_8 => mul_12 # mul_9 => mul_13 # sum_10 => sum_11 # sum_11 => sum_12 # sum_14 => sum_15 # sum_15 => sum_16 # sum_2 => sum_3 # sum_3 => sum_4 # sum_6 => sum_7 # sum_7 => sum_8 # Graph fragment: # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 4), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_1), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_11,), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, %mul_12), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 0), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_5, 4), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_13, %mul_15), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %add_2), kwargs = {}) # %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_4,), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_3,), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_7, 4), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %primals_4), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_17,), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, 4), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_16, %mul_18), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %add_4), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_9, 4), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_10, 4), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, %mul_21), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_6), kwargs = {}) # %abs_5 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_6,), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_5,), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_11, 4), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %primals_6), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_23,), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_12, 4), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_22, %mul_24), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %add_8), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_13, 4), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_14, 4), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_25, %mul_27), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %add_10), kwargs = {}) # %abs_7 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_8,), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_7,), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_15, 4), kwargs = {}) # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_8, %primals_8), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_29,), kwargs = {}) # %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_16, 4), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_28, %mul_30), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %add_12), kwargs = {}) # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_17, 4), kwargs = {}) # %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_18, 4), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_31, %mul_33), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %add_14), kwargs = {}) triton_per_fused_abs_add_mul_sum_4 = async_compile.triton('triton_per_fused_abs_add_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32', 14: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {13: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14), equal_to_1=(13,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mul_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_mul_sum_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp9 = tl.load(in_ptr1 + (r0), None) tmp18 = tl.load(in_ptr2 + (r0), None) tmp27 = tl.load(in_ptr3 + (r0), None) tmp42 = tl.load(in_ptr4 + (0)) tmp43 = tl.broadcast_to(tmp42, [XBLOCK, 1]) tmp45 = tl.load(in_ptr5 + (0)) tmp46 = tl.broadcast_to(tmp45, [XBLOCK, 1]) tmp54 = tl.load(in_ptr6 + (0)) tmp55 = tl.broadcast_to(tmp54, [XBLOCK, 1]) tmp57 = tl.load(in_ptr7 + (0)) tmp58 = tl.broadcast_to(tmp57, [XBLOCK, 1]) tmp66 = tl.load(in_ptr8 + (0)) tmp67 = tl.broadcast_to(tmp66, [XBLOCK, 1]) tmp69 = tl.load(in_ptr9 + (0)) tmp70 = tl.broadcast_to(tmp69, [XBLOCK, 1]) tmp78 = tl.load(in_ptr10 + (0)) tmp79 = tl.broadcast_to(tmp78, [XBLOCK, 1]) tmp81 = tl.load(in_ptr11 + (0)) tmp82 = tl.broadcast_to(tmp81, [XBLOCK, 1]) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp10 = tl_math.abs(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tmp9 * tmp9 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp19 = tl_math.abs(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = tmp18 * tmp18 tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp28 = tl_math.abs(tmp27) tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.sum(tmp29, 1)[:, None] tmp32 = tmp27 * tmp27 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = 4.0 tmp37 = tmp4 * tmp36 tmp38 = tmp8 * tmp36 tmp39 = tmp37 + tmp38 tmp40 = 0.0 tmp41 = tmp39 + tmp40 tmp44 = tmp43 * tmp36 tmp47 = tmp46 * tmp36 tmp48 = tmp44 + tmp47 tmp49 = tmp41 + tmp48 tmp50 = tmp22 * tmp36 tmp51 = tmp26 * tmp36 tmp52 = tmp50 + tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp55 * tmp36 tmp59 = tmp58 * tmp36 tmp60 = tmp56 + tmp59 tmp61 = tmp53 + tmp60 tmp62 = tmp31 * tmp36 tmp63 = tmp35 * tmp36 tmp64 = tmp62 + tmp63 tmp65 = tmp61 + tmp64 tmp68 = tmp67 * tmp36 tmp71 = tmp70 * tmp36 tmp72 = tmp68 + tmp71 tmp73 = tmp65 + tmp72 tmp74 = tmp13 * tmp36 tmp75 = tmp17 * tmp36 tmp76 = tmp74 + tmp75 tmp77 = tmp73 + tmp76 tmp80 = tmp79 * tmp36 tmp83 = tmp82 * tmp36 tmp84 = tmp80 + tmp83 tmp85 = tmp77 + tmp84 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp85, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [t0], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 16, grid=grid(16), stream=stream0) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [t0_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [t0_2], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_0.run(buf6, primals_7, buf7, buf8, 16, grid=grid(16), stream=stream0) buf9 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf5, (4, 4), (1, 4), 0), primals_10, out=buf10) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm] extern_kernels.mm(buf10, buf5, out=buf11) buf13 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [t0_3, sub, mul_1, mul_2, mul_5, L_2nd], Original ATen: [aten.leaky_relu, aten.sub, aten.mul, aten.sum] triton_per_fused_leaky_relu_mul_sub_sum_1.run(primals_3, buf9, buf13, 1, 16, grid=grid(1), stream=stream0) buf31 = empty_strided_cuda((), (), torch.float32) buf32 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [trace, L_1st, mul_30, mul_31, add_9], Original ATen: [aten.trace, aten.mul, aten.add] triton_per_fused_add_mul_trace_2.run(buf11, buf13, buf31, buf32, 1, 4, grid=grid(1), stream=stream0) del buf11 buf16 = empty_strided_cuda((), (), torch.float32) buf17 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [abs_2, sum_4, mul_10, sum_5], Original ATen: [aten.abs, aten.sum, aten.mul] triton_per_fused_abs_mul_sum_3.run(primals_2, buf16, buf17, 1, 4, grid=grid(1), stream=stream0) buf20 = empty_strided_cuda((), (), torch.float32) buf21 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [abs_4, sum_8, mul_16, sum_9], Original ATen: [aten.abs, aten.sum, aten.mul] triton_per_fused_abs_mul_sum_3.run(primals_5, buf20, buf21, 1, 4, grid=grid(1), stream=stream0) buf25 = empty_strided_cuda((), (), torch.float32) buf26 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [abs_6, sum_12, mul_22, sum_13], Original ATen: [aten.abs, aten.sum, aten.mul] triton_per_fused_abs_mul_sum_3.run(primals_7, buf25, buf26, 1, 4, grid=grid(1), stream=stream0) buf29 = empty_strided_cuda((), (), torch.float32) buf30 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [abs_8, sum_16, mul_28, sum_17], Original ATen: [aten.abs, aten.sum, aten.mul] triton_per_fused_abs_mul_sum_3.run(primals_9, buf29, buf30, 1, 4, grid=grid(1), stream=stream0) buf14 = empty_strided_cuda((), (), torch.float32) buf24 = buf14; del buf14 # reuse buf33 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [abs_1, sum_2, mul_6, mul_7, sum_3, mul_8, add, L_reg, mul_9, mul_11, add_2, L_reg_1, abs_3, sum_6, mul_12, mul_13, sum_7, mul_14, add_3, L_reg_2, mul_15, mul_17, add_4, L_reg_3, abs_5, sum_10, mul_18, mul_19, sum_11, mul_20, add_5, L_reg_4, mul_21, mul_23, add_6, L_reg_5, abs_7, sum_14, mul_24, mul_25, sum_15, mul_26, add_7, L_reg_6, mul_27, mul_29, add_8, L_reg_7], Original ATen: [aten.abs, aten.sum, aten.mul, aten.add] triton_per_fused_abs_add_mul_sum_4.run(buf33, primals_1, primals_8, primals_4, primals_6, buf16, buf17, buf20, buf21, buf25, buf26, buf29, buf30, 1, 16, grid=grid(1), stream=stream0) del buf16 del buf17 del buf20 del buf21 del buf25 del buf26 del buf29 del buf30 return (buf31, buf13, buf32, buf33, buf5, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, buf1, buf2, buf4, buf5, buf7, buf8, buf9, reinterpret_tensor(buf10, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F import torch as torch class SDNE_layer(nn.Module): def __init__(self, num_node, hidden_size1, hidden_size2, droput, alpha, beta, nu1, nu2): super(SDNE_layer, self).__init__() self.num_node = num_node self.hidden_size1 = hidden_size1 self.hidden_size2 = hidden_size2 self.droput = droput self.alpha = alpha self.beta = beta self.nu1 = nu1 self.nu2 = nu2 self.encode0 = nn.Linear(self.num_node, self.hidden_size1) self.encode1 = nn.Linear(self.hidden_size1, self.hidden_size2) self.decode0 = nn.Linear(self.hidden_size2, self.hidden_size1) self.decode1 = nn.Linear(self.hidden_size1, self.num_node) def forward(self, adj_mat, l_mat): t0 = F.leaky_relu(self.encode0(adj_mat)) t0 = F.leaky_relu(self.encode1(t0)) self.embedding = t0 t0 = F.leaky_relu(self.decode0(t0)) t0 = F.leaky_relu(self.decode1(t0)) L_1st = 2 * torch.trace(torch.mm(torch.mm(torch.t(self.embedding), l_mat), self.embedding)) L_2nd = torch.sum((adj_mat - t0) * adj_mat * self.beta * ((adj_mat - t0) * adj_mat * self.beta)) L_reg = 0 for param in self.parameters(): L_reg += self.nu1 * torch.sum(torch.abs(param) ) + self.nu2 * torch.sum(param * param) return self.alpha * L_1st, L_2nd, self.alpha * L_1st + L_2nd, L_reg def get_emb(self, adj): t0 = self.encode0(adj) t0 = self.encode1(t0) return t0 def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_node': 4, 'hidden_size1': 4, 'hidden_size2': 4, 'droput': 4, 'alpha': 4, 'beta': 4, 'nu1': 4, 'nu2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch.nn as nn import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_per_fused_leaky_relu_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 0.0 tmp3 = tmp1 > tmp2 tmp4 = 0.01 tmp5 = tmp1 * tmp4 tmp6 = tl.where(tmp3, tmp1, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tmp7 * tmp0 tmp9 = 4.0 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None) @triton.jit def triton_per_fused_add_mul_trace_2(in_ptr0, in_ptr1, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp10 = tmp7 + tmp9 tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None) @triton.jit def triton_per_fused_abs_mul_sum_3(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) @triton.jit def triton_per_fused_abs_add_mul_sum_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp18 = tl.load(in_ptr2 + r0, None) tmp27 = tl.load(in_ptr3 + r0, None) tmp42 = tl.load(in_ptr4 + 0) tmp43 = tl.broadcast_to(tmp42, [XBLOCK, 1]) tmp45 = tl.load(in_ptr5 + 0) tmp46 = tl.broadcast_to(tmp45, [XBLOCK, 1]) tmp54 = tl.load(in_ptr6 + 0) tmp55 = tl.broadcast_to(tmp54, [XBLOCK, 1]) tmp57 = tl.load(in_ptr7 + 0) tmp58 = tl.broadcast_to(tmp57, [XBLOCK, 1]) tmp66 = tl.load(in_ptr8 + 0) tmp67 = tl.broadcast_to(tmp66, [XBLOCK, 1]) tmp69 = tl.load(in_ptr9 + 0) tmp70 = tl.broadcast_to(tmp69, [XBLOCK, 1]) tmp78 = tl.load(in_ptr10 + 0) tmp79 = tl.broadcast_to(tmp78, [XBLOCK, 1]) tmp81 = tl.load(in_ptr11 + 0) tmp82 = tl.broadcast_to(tmp81, [XBLOCK, 1]) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp10 = tl_math.abs(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tmp9 * tmp9 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp19 = tl_math.abs(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = tmp18 * tmp18 tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp28 = tl_math.abs(tmp27) tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.sum(tmp29, 1)[:, None] tmp32 = tmp27 * tmp27 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = 4.0 tmp37 = tmp4 * tmp36 tmp38 = tmp8 * tmp36 tmp39 = tmp37 + tmp38 tmp40 = 0.0 tmp41 = tmp39 + tmp40 tmp44 = tmp43 * tmp36 tmp47 = tmp46 * tmp36 tmp48 = tmp44 + tmp47 tmp49 = tmp41 + tmp48 tmp50 = tmp22 * tmp36 tmp51 = tmp26 * tmp36 tmp52 = tmp50 + tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp55 * tmp36 tmp59 = tmp58 * tmp36 tmp60 = tmp56 + tmp59 tmp61 = tmp53 + tmp60 tmp62 = tmp31 * tmp36 tmp63 = tmp35 * tmp36 tmp64 = tmp62 + tmp63 tmp65 = tmp61 + tmp64 tmp68 = tmp67 * tmp36 tmp71 = tmp70 * tmp36 tmp72 = tmp68 + tmp71 tmp73 = tmp65 + tmp72 tmp74 = tmp13 * tmp36 tmp75 = tmp17 * tmp36 tmp76 = tmp74 + tmp75 tmp77 = tmp73 + tmp76 tmp80 = tmp79 * tmp36 tmp83 = tmp82 * tmp36 tmp84 = tmp80 + tmp83 tmp85 = tmp77 + tmp84 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp85, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(16)](buf0, primals_2, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf0 del buf0 extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_leaky_relu_0[grid(16)](buf3, primals_5, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf3 del buf3 extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4 ), 0), out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_leaky_relu_0[grid(16)](buf6, primals_7, buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = buf6 del buf6 extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (4, 4), (1, 4), 0), primals_10, out=buf10) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, buf5, out=buf11) buf13 = empty_strided_cuda((), (), torch.float32) triton_per_fused_leaky_relu_mul_sub_sum_1[grid(1)](primals_3, buf9, buf13, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf31 = empty_strided_cuda((), (), torch.float32) buf32 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_mul_trace_2[grid(1)](buf11, buf13, buf31, buf32, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf11 buf16 = empty_strided_cuda((), (), torch.float32) buf17 = empty_strided_cuda((), (), torch.float32) triton_per_fused_abs_mul_sum_3[grid(1)](primals_2, buf16, buf17, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf20 = empty_strided_cuda((), (), torch.float32) buf21 = empty_strided_cuda((), (), torch.float32) triton_per_fused_abs_mul_sum_3[grid(1)](primals_5, buf20, buf21, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf25 = empty_strided_cuda((), (), torch.float32) buf26 = empty_strided_cuda((), (), torch.float32) triton_per_fused_abs_mul_sum_3[grid(1)](primals_7, buf25, buf26, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf29 = empty_strided_cuda((), (), torch.float32) buf30 = empty_strided_cuda((), (), torch.float32) triton_per_fused_abs_mul_sum_3[grid(1)](primals_9, buf29, buf30, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf14 = empty_strided_cuda((), (), torch.float32) buf24 = buf14 del buf14 buf33 = buf24 del buf24 triton_per_fused_abs_add_mul_sum_4[grid(1)](buf33, primals_1, primals_8, primals_4, primals_6, buf16, buf17, buf20, buf21, buf25, buf26, buf29, buf30, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf16 del buf17 del buf20 del buf21 del buf25 del buf26 del buf29 del buf30 return (buf31, buf13, buf32, buf33, buf5, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, buf1, buf2, buf4, buf5, buf7, buf8, buf9, reinterpret_tensor(buf10, (4, 4), (1, 4), 0)) class SDNE_layerNew(nn.Module): def __init__(self, num_node, hidden_size1, hidden_size2, droput, alpha, beta, nu1, nu2): super(SDNE_layerNew, self).__init__() self.num_node = num_node self.hidden_size1 = hidden_size1 self.hidden_size2 = hidden_size2 self.droput = droput self.alpha = alpha self.beta = beta self.nu1 = nu1 self.nu2 = nu2 self.encode0 = nn.Linear(self.num_node, self.hidden_size1) self.encode1 = nn.Linear(self.hidden_size1, self.hidden_size2) self.decode0 = nn.Linear(self.hidden_size2, self.hidden_size1) self.decode1 = nn.Linear(self.hidden_size1, self.num_node) def get_emb(self, adj): t0 = self.encode0(adj) t0 = self.encode1(t0) return t0 def forward(self, input_0, input_1): primals_1 = self.encode0.weight primals_2 = self.encode0.bias primals_3 = self.encode1.weight primals_5 = self.encode1.bias primals_4 = self.decode0.weight primals_7 = self.decode0.bias primals_6 = self.decode1.weight primals_9 = self.decode1.bias primals_8 = input_0 primals_10 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0], output[1], output[2], output[3]
ckhui/cogdl
SDNE_layer
false
12,666
[ "MIT" ]
0
93bea17c2dc7084857cd0a4af8178c174965127c
https://github.com/ckhui/cogdl/tree/93bea17c2dc7084857cd0a4af8178c174965127c
LearnedPositionalEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5i/c5iybmnijeaxq3pumkl5crtkns462pwdrh72bxy4lcvnlh3r4364.py # Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum] # Source node to ATen node mapping: # cumsum => cumsum # mask => convert_element_type # ne => ne # Graph fragment: # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%convert_element_type, 1), kwargs = {}) triton_per_fused__to_copy_cumsum_ne_0 = async_compile.triton('triton_per_fused__to_copy_cumsum_ne_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_cumsum_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0) tmp1 = 4.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int32) tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.int64) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ft/cftxaavy7b7scxgnrhfsvfnicvimxnf3kpckow5nzkyed3meyoli.py # Topologically Sorted Source Nodes: [ne, mask, type_as, incremental_indices, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add] # Source node to ATen node mapping: # incremental_indices => mul # long => convert_element_type_2 # mask => convert_element_type # ne => ne # positions => add # type_as => convert_element_type_1 # Graph fragment: # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%cumsum, torch.int32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %convert_element_type), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul, torch.int64), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 4), kwargs = {}) triton_poi_fused__to_copy_add_mul_ne_1 = async_compile.triton('triton_poi_fused__to_copy_add_mul_ne_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_mul_ne_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0.to(tl.int32) tmp3 = 4.0 tmp4 = tmp2 != tmp3 tmp5 = tmp4.to(tl.int32) tmp6 = tmp1 * tmp5 tmp7 = tmp6.to(tl.int64) tmp8 = tl.full([1], 4, tl.int64) tmp9 = tmp7 + tmp8 tl.store(in_out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/in/cinglqnf6mtochspmiolvr3bqay6yiivzgqlihpkdlbd5p4ccw54.py # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding] # Source node to ATen node mapping: # embedding => embedding # Graph fragment: # %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %add, 4), kwargs = {}) triton_poi_fused_embedding_2 = async_compile.triton('triton_poi_fused_embedding_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 9, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 9)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 9") tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (9, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum] stream0 = get_raw_stream(0) triton_per_fused__to_copy_cumsum_ne_0.run(primals_1, buf0, 64, 4, grid=grid(64), stream=stream0) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [ne, mask, type_as, incremental_indices, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add] triton_poi_fused__to_copy_add_mul_ne_1.run(buf1, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding] triton_poi_fused_embedding_2.run(buf1, primals_2, buf2, 1024, grid=grid(1024), stream=stream0) del primals_2 return (buf2, buf1, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((9, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor: """ mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', padding_idx: 'int'): assert padding_idx is not None num_embeddings += padding_idx + 1 super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx ) def forward(self, input, use_cache=False): """Input is expected to be of size [bsz x seqlen].""" if use_cache: pos = int(self.padding_idx + input.size(1)) positions = input.data.new(1, 1).fill_(pos) else: positions = create_position_ids_from_input_ids(input, self. padding_idx) return super().forward(positions), positions def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_embeddings': 4, 'embedding_dim': 4, 'padding_idx': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = 4.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int32) tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.int64) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp7, xmask) @triton.jit def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.int32) tmp3 = 4.0 tmp4 = tmp2 != tmp3 tmp5 = tmp4.to(tl.int32) tmp6 = tmp1 * tmp5 tmp7 = tmp6.to(tl.int64) tmp8 = tl.full([1], 4, tl.int64) tmp9 = tmp7 + tmp8 tl.store(in_out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 9, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 9) | ~xmask, 'index out of bounds: 0 <= tmp4 < 9') tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (9, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) get_raw_stream(0) triton_per_fused__to_copy_cumsum_ne_0[grid(64)](primals_1, buf0, 64, 4, XBLOCK=32, num_warps=2, num_stages=1) buf1 = buf0 del buf0 triton_poi_fused__to_copy_add_mul_ne_1[grid(256)](buf1, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_embedding_2[grid(1024)](buf1, primals_2, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, buf1, buf1 def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor: """ mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx class LearnedPositionalEmbeddingNew(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', padding_idx: 'int'): assert padding_idx is not None num_embeddings += padding_idx + 1 super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx ) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
kev2513/gap-text2sql
LearnedPositionalEmbedding
false
12,667
[ "Apache-2.0" ]
0
67c4d6489ac44d4785a0cc1b836c889f00226f1d
https://github.com/kev2513/gap-text2sql/tree/67c4d6489ac44d4785a0cc1b836c889f00226f1d
CrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # cross_entropy => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/za/cza3krejiqg7tjfagjknvrkfb4hawyqjlraihwndrbdsaurpzx3y.py # Topologically Sorted Source Nodes: [cross_entropy, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.mean] # Source node to ATen node mapping: # cross_entropy => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # mean => mean # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) triton_per_fused__log_softmax_div_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = 1.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [cross_entropy, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.mean] triton_per_fused__log_softmax_div_mean_mul_neg_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.cpp_extension class CrossEntropyLoss(torch.nn.Module): def __init__(self): super(CrossEntropyLoss, self).__init__() self.ce_loss = torch.nn.CrossEntropyLoss() def forward(self, cls_output, label, **_): return self.ce_loss(cls_output, label).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = 1.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mean_mul_neg_sum_1[grid(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class CrossEntropyLossNew(torch.nn.Module): def __init__(self): super(CrossEntropyLossNew, self).__init__() self.ce_loss = torch.nn.CrossEntropyLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hugobloem/PyTorch-StudioGAN
CrossEntropyLoss
false
12,668
[ "MIT" ]
0
3deab27c0774adba5a94c7f452d32d4cbc3b117c
https://github.com/hugobloem/PyTorch-StudioGAN/tree/3deab27c0774adba5a94c7f452d32d4cbc3b117c
LSoftLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/jj/cjjzsmiaz6yk7enttuncto5y2bnsngnjksfwuet4p7shqjhbwt36.py # Topologically Sorted Source Nodes: [mul, sub, mul_1, y_true_updated, binary_cross_entropy], Original ATen: [aten.mul, aten.rsub, aten.add, aten.binary_cross_entropy] # Source node to ATen node mapping: # binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mul_2, mul_3, neg, sub_1, sub_2 # mul => mul # mul_1 => mul_1 # sub => sub # y_true_updated => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg2_1,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %maximum_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_poi_fused_add_binary_cross_entropy_mul_rsub_0 = async_compile.triton('triton_poi_fused_add_binary_cross_entropy_mul_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_binary_cross_entropy_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_binary_cross_entropy_mul_rsub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp5 = tl.load(in_ptr2 + (x0), xmask) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = tmp7 - tmp3 tmp9 = -tmp5 tmp10 = libdevice.log1p(tmp9) tmp11 = -100.0 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp8 * tmp12 tmp14 = tl_math.log(tmp5) tmp15 = triton_helpers.maximum(tmp14, tmp11) tmp16 = tmp7 * tmp15 tmp17 = tmp13 - tmp16 tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sub, mul_1, y_true_updated, binary_cross_entropy], Original ATen: [aten.mul, aten.rsub, aten.add, aten.binary_cross_entropy] stream0 = get_raw_stream(0) triton_poi_fused_add_binary_cross_entropy_mul_rsub_0.run(arg0_1, arg1_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class LSoftLoss(nn.Module): def __init__(self): super().__init__() def forward(self, y_pred, y_true, beta): with torch.no_grad(): y_true_updated = beta * y_true + (1 - beta) * y_pred return F.binary_cross_entropy(y_pred, y_true_updated, reduction='none') def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_binary_cross_entropy_mul_rsub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp5 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = tmp7 - tmp3 tmp9 = -tmp5 tmp10 = libdevice.log1p(tmp9) tmp11 = -100.0 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp8 * tmp12 tmp14 = tl_math.log(tmp5) tmp15 = triton_helpers.maximum(tmp14, tmp11) tmp16 = tmp7 * tmp15 tmp17 = tmp13 - tmp16 tl.store(out_ptr0 + x0, tmp17, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_binary_cross_entropy_mul_rsub_0[grid(256)](arg0_1, arg1_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class LSoftLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
khodwe56/kaggle-birdsong-recognition
LSoftLoss
false
12,669
[ "MIT" ]
0
95a902c37355619cf02558968f000038e487db47
https://github.com/khodwe56/kaggle-birdsong-recognition/tree/95a902c37355619cf02558968f000038e487db47
RNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # combined => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 return (buf2, buf1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) def forward(self, input, hidden): combined = torch.cat((input, hidden), 1) hidden = self.i2h(combined) output = self.i2o(combined) return output, hidden def initHidden(self): return torch.zeros(1, self.hidden_size) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 return buf2, buf1, buf0 class RNNNew(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNNNew, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) def initHidden(self): return torch.zeros(1, self.hidden_size) def forward(self, input_0, input_1): primals_3 = self.i2h.weight primals_4 = self.i2h.bias primals_5 = self.i2o.weight primals_6 = self.i2o.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
khalilbalaree/Key-Smasher
RNN
false
12,670
[ "Apache-2.0" ]
0
981bb1fd9b91e9a693dba8b1cd4ee7ea82409d14
https://github.com/khalilbalaree/Key-Smasher/tree/981bb1fd9b91e9a693dba8b1cd4ee7ea82409d14
CDEFunc
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # z_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6t/c6teacbdnrlxbeecxkpwnmehhrwep7knmvoy2pekproaa5jueeax.py # Topologically Sorted Source Nodes: [z_3], Original ATen: [aten.tanh, aten.tanh_backward] # Source node to ATen node mapping: # z_3 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul), kwargs = {}) triton_poi_fused_tanh_tanh_backward_1 = async_compile.triton('triton_poi_fused_tanh_tanh_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_tanh_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_tanh_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + (x2), tmp3, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 128), (128, 1)) assert_size_stride(primals_5, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 16), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [z_3], Original ATen: [aten.tanh, aten.tanh_backward] triton_poi_fused_tanh_tanh_backward_1.run(buf3, primals_5, buf4, 1024, grid=grid(1024), stream=stream0) del primals_5 return (reinterpret_tensor(buf3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class CDEFunc(torch.nn.Module): def __init__(self, input_channels, hidden_channels): super(CDEFunc, self).__init__() self.input_channels = input_channels self.hidden_channels = hidden_channels self.linear1 = torch.nn.Linear(hidden_channels, 128) self.linear2 = torch.nn.Linear(128, input_channels * hidden_channels) def forward(self, z): z = self.linear1(z) z = z.relu() z = self.linear2(z) z = z.tanh() z = z.view(*z.shape[:-1], self.hidden_channels, self.input_channels) return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'hidden_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_tanh_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 128), (128, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 16), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_tanh_tanh_backward_1[grid(1024)](buf3, primals_5, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_4, buf5 class CDEFuncNew(torch.nn.Module): def __init__(self, input_channels, hidden_channels): super(CDEFuncNew, self).__init__() self.input_channels = input_channels self.hidden_channels = hidden_channels self.linear1 = torch.nn.Linear(hidden_channels, 128) self.linear2 = torch.nn.Linear(128, input_channels * hidden_channels) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
khaledsaab/NeuralCDE
CDEFunc
false
12,671
[ "Apache-2.0" ]
0
559d9d6fdb137afd14965725ea4845cf31e9235c
https://github.com/khaledsaab/NeuralCDE/tree/559d9d6fdb137afd14965725ea4845cf31e9235c
NegativeSampling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5x/c5x3rnyccugyytvfisd523ro34i2ddgorx7wbrbsps3dyo4sl2ve.py # Topologically Sorted Source Nodes: [log_sigmoid, log_sigmoid_1, neg, sum_1, truediv, add, sum_2, neg_1, truediv_1], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.sum, aten.div, aten.add] # Source node to ATen node mapping: # add => add # log_sigmoid => abs_1, exp, full_default, log1p, minimum, neg, sub # log_sigmoid_1 => abs_2, exp_1, full_default_1, log1p_1, minimum_1, neg_2, sub_1 # neg => neg_1 # neg_1 => neg_3 # sum_1 => sum_1 # sum_2 => sum_2 # truediv => div # truediv_1 => div_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %select), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%slice_3,), kwargs = {}) # %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %neg_1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%neg_1,), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_2,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_1, %log1p_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [1]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %div), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg_3, 4), kwargs = {}) triton_per_fused_add_div_log_sigmoid_forward_neg_sum_0 = async_compile.triton('triton_per_fused_add_div_log_sigmoid_forward_neg_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_sigmoid_forward_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_sigmoid_forward_neg_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp16 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp25 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp1 = 0.0 tmp2 = triton_helpers.minimum(tmp1, tmp0) tmp3 = tl_math.abs(tmp0) tmp4 = -tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = libdevice.log1p(tmp5) tmp7 = tmp2 - tmp6 tmp9 = -tmp8 tmp10 = triton_helpers.minimum(tmp1, tmp9) tmp11 = tl_math.abs(tmp9) tmp12 = -tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = libdevice.log1p(tmp13) tmp15 = tmp10 - tmp14 tmp17 = -tmp16 tmp18 = triton_helpers.minimum(tmp1, tmp17) tmp19 = tl_math.abs(tmp17) tmp20 = -tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = libdevice.log1p(tmp21) tmp23 = tmp18 - tmp22 tmp24 = tmp15 + tmp23 tmp26 = -tmp25 tmp27 = triton_helpers.minimum(tmp1, tmp26) tmp28 = tl_math.abs(tmp26) tmp29 = -tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = libdevice.log1p(tmp30) tmp32 = tmp27 - tmp31 tmp33 = tmp24 + tmp32 tmp34 = 0.3333333333333333 tmp35 = tmp33 * tmp34 tmp36 = tmp7 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = -tmp39 tmp41 = 0.25 tmp42 = tmp40 * tmp41 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp42, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [log_sigmoid, log_sigmoid_1, neg, sum_1, truediv, add, sum_2, neg_1, truediv_1], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.sum, aten.div, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_div_log_sigmoid_forward_neg_sum_0.run(buf2, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NegativeSampling(nn.Module): """Negative sampling loss as proposed by T. Mikolov et al. in Distributed Representations of Words and Phrases and their Compositionality. """ def __init__(self): super(NegativeSampling, self).__init__() self._log_sigmoid = nn.LogSigmoid() def forward(self, scores): """Computes the value of the loss function. Parameters ---------- scores: autograd.Variable of size (batch_size, num_noise_words + 1) Sparse unnormalized log probabilities. The first element in each row is the ground truth score (i.e. the target), other elements are scores of samples from the noise distribution. """ k = scores.size()[1] - 1 return -torch.sum(self._log_sigmoid(scores[:, 0]) + torch.sum(self. _log_sigmoid(-scores[:, 1:]), dim=1) / k) / scores.size()[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_sigmoid_forward_neg_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp25 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp1 = 0.0 tmp2 = triton_helpers.minimum(tmp1, tmp0) tmp3 = tl_math.abs(tmp0) tmp4 = -tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = libdevice.log1p(tmp5) tmp7 = tmp2 - tmp6 tmp9 = -tmp8 tmp10 = triton_helpers.minimum(tmp1, tmp9) tmp11 = tl_math.abs(tmp9) tmp12 = -tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = libdevice.log1p(tmp13) tmp15 = tmp10 - tmp14 tmp17 = -tmp16 tmp18 = triton_helpers.minimum(tmp1, tmp17) tmp19 = tl_math.abs(tmp17) tmp20 = -tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = libdevice.log1p(tmp21) tmp23 = tmp18 - tmp22 tmp24 = tmp15 + tmp23 tmp26 = -tmp25 tmp27 = triton_helpers.minimum(tmp1, tmp26) tmp28 = tl_math.abs(tmp26) tmp29 = -tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = libdevice.log1p(tmp30) tmp32 = tmp27 - tmp31 tmp33 = tmp24 + tmp32 tmp34 = 0.3333333333333333 tmp35 = tmp33 * tmp34 tmp36 = tmp7 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = -tmp39 tmp41 = 0.25 tmp42 = tmp40 * tmp41 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp42, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_div_log_sigmoid_forward_neg_sum_0[grid(1)](buf2, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class NegativeSamplingNew(nn.Module): """Negative sampling loss as proposed by T. Mikolov et al. in Distributed Representations of Words and Phrases and their Compositionality. """ def __init__(self): super(NegativeSamplingNew, self).__init__() self._log_sigmoid = nn.LogSigmoid() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kimoyerr/my-dataloader
NegativeSampling
false
12,672
[ "MIT" ]
0
a235e2f02d936df3f835b423dd015afa52e54066
https://github.com/kimoyerr/my-dataloader/tree/a235e2f02d936df3f835b423dd015afa52e54066
SpatialAttention2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/jx/cjxwinyp4pgvwiomfmbv6hp6mb5npqkunztxjcjz5zmgstva2cwe.py # Topologically Sorted Source Nodes: [z_1, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # z_1 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [z_1, mul], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(primals_2, buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, primals_1, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SpatialAttention2d(nn.Module): def __init__(self, channel): super(SpatialAttention2d, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.squeeze(x) z = self.sigmoid(z) return x * z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](primals_2, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf0 class SpatialAttention2dNew(nn.Module): def __init__(self, channel): super(SpatialAttention2dNew, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.squeeze.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
khodwe56/kaggle-birdsong-recognition
SpatialAttention2d
false
12,673
[ "MIT" ]
0
95a902c37355619cf02558968f000038e487db47
https://github.com/khodwe56/kaggle-birdsong-recognition/tree/95a902c37355619cf02558968f000038e487db47
AnswerModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 return (reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 8), (8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init as init class AnswerModule(nn.Module): def __init__(self, vocab_size, hidden_size): super(AnswerModule, self).__init__() self.z = nn.Linear(2 * hidden_size, vocab_size) init.xavier_normal_(self.z.state_dict()['weight']) self.dropout = nn.Dropout(0.1) def forward(self, M, questions): M = self.dropout(M) concat = torch.cat([M, questions], dim=2).squeeze(1) z = self.z(concat) return z def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'vocab_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 return reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 8), (8, 1), 0) class AnswerModuleNew(nn.Module): def __init__(self, vocab_size, hidden_size): super(AnswerModuleNew, self).__init__() self.z = nn.Linear(2 * hidden_size, vocab_size) init.xavier_normal_(self.z.state_dict()['weight']) self.dropout = nn.Dropout(0.1) def forward(self, input_0, input_1): primals_3 = self.z.weight primals_4 = self.z.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
kirubarajan/Dynamic-Memory-Network-Plus
AnswerModule
false
12,674
[ "Apache-2.0" ]
0
0613287ef5a959c7b260afcea2c31afcfb0ea189
https://github.com/kirubarajan/Dynamic-Memory-Network-Plus/tree/0613287ef5a959c7b260afcea2c31afcfb0ea189
BinaryClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2u/c2ugr235lp7hjoeji4mzlplxg2zvzygy2xvsjv2bvmzp6eggn7yk.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ki/ckinq4jozjo44rxyehsa2lhip3xyubjyvbx6rmx44eqvtqzfiasz.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # x_4 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2, ), (1, )) assert_size_stride(primals_6, (4, 2), (2, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 128, grid=grid(128), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 2), (2, 1), 0), reinterpret_tensor(primals_6, (2, 4), (1, 2), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 2), (2, 1), 0), buf5, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.data class BinaryClassifier(nn.Module): """ Define a neural network that performs binary classification. The network should accept your number of features as input, and produce a single sigmoid value, that can be rounded to a label: 0 or 1, as output. Notes on training: To train a binary classifier in PyTorch, use BCELoss. BCELoss is binary cross entropy loss, documentation: https://pytorch.org/docs/stable/nn.html#torch.nn.BCELoss """ def __init__(self, input_features, hidden_dim, output_dim): """ Initialize model with setting up the linear layers. then make input parameters so define the layers of our model. :param input_features: make our number for input features in our training/test data :param hidden_dim: define number of the nodes in hidden layer(s) :param output_dim: number of the outputs we want to produce """ super(BinaryClassifier, self).__init__() self.fc1 = nn.Linear(input_features, hidden_dim) self.fc2 = nn.Linear(hidden_dim, int(hidden_dim // 2)) self.fc3 = nn.Linear(int(hidden_dim // 2), output_dim) self.drop = nn.Dropout(0.25) self.sig = nn.Sigmoid() def forward(self, x): """ displaythe forward pass for our model of the input features, x. :param x: the batch of the input features of the size (batch_size, input_features) :return: thesingle, sigmoid-activated value as the output """ x = F.relu(self.fc1(x)) x = self.drop(x) x = F.relu(self.fc2(x)) x = self.drop(x) x = self.sig(self.fc3(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_features': 4, 'hidden_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (4, 2), (2, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(128)](buf3, primals_5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 2), (2, 1), 0), reinterpret_tensor(primals_6, (2, 4), (1, 2), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(256)](buf5, primals_7, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 2), (2, 1), 0), buf5, primals_6, buf6, primals_4, buf7 class BinaryClassifierNew(nn.Module): """ Define a neural network that performs binary classification. The network should accept your number of features as input, and produce a single sigmoid value, that can be rounded to a label: 0 or 1, as output. Notes on training: To train a binary classifier in PyTorch, use BCELoss. BCELoss is binary cross entropy loss, documentation: https://pytorch.org/docs/stable/nn.html#torch.nn.BCELoss """ def __init__(self, input_features, hidden_dim, output_dim): """ Initialize model with setting up the linear layers. then make input parameters so define the layers of our model. :param input_features: make our number for input features in our training/test data :param hidden_dim: define number of the nodes in hidden layer(s) :param output_dim: number of the outputs we want to produce """ super(BinaryClassifierNew, self).__init__() self.fc1 = nn.Linear(input_features, hidden_dim) self.fc2 = nn.Linear(hidden_dim, int(hidden_dim // 2)) self.fc3 = nn.Linear(int(hidden_dim // 2), output_dim) self.drop = nn.Dropout(0.25) self.sig = nn.Sigmoid() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
khadija267/Plagiarism-Detection
BinaryClassifier
false
12,675
[ "MIT" ]
0
90334167a8e6406e3f1ee178e616d6aa0094b1b5
https://github.com/khadija267/Plagiarism-Detection/tree/90334167a8e6406e3f1ee178e616d6aa0094b1b5
SCse
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.mean] # Source node to ATen node mapping: # z_2 => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ad/cadccuyhl7stcp3nyqfgohiwbiv5ckfzxsye27ithwsill6dvmh4.py # Topologically Sorted Source Nodes: [conv2d_1, z_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # z_3 => relu # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ja/cjafhv3pz4ruylypa4fzgtmsu3ji2wfc7gvhxz7bbv527bxiebxd.py # Topologically Sorted Source Nodes: [z_1, mul, z_4, mul_1, add], Original ATen: [aten.sigmoid, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # mul_1 => mul_1 # z_1 => sigmoid # z_4 => sigmoid_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x4 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp0 * tmp5 tmp7 = tmp3 + tmp6 tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf2, primals_2, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv2d_1, z_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf4, primals_4, 4, grid=grid(4), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf6, primals_6, 16, grid=grid(16), stream=stream0) del primals_6 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [z_1, mul, z_4, mul_1, add], Original ATen: [aten.sigmoid, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_3.run(primals_2, buf0, buf6, buf7, 256, grid=grid(256), stream=stream0) return (buf7, primals_1, primals_2, primals_3, primals_5, buf0, buf2, buf4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SpatialAttention2d(nn.Module): def __init__(self, channel): super(SpatialAttention2d, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.squeeze(x) z = self.sigmoid(z) return x * z class GAB(nn.Module): def __init__(self, input_dim, reduction=4): super(GAB, self).__init__() self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1) self.conv2 = nn.Conv2d(input_dim // reduction, input_dim, kernel_size=1, stride=1) self.relu = nn.ReLU(inplace=True) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.global_avgpool(x) z = self.relu(self.conv1(z)) z = self.sigmoid(self.conv2(z)) return x * z class SCse(nn.Module): def __init__(self, dim): super(SCse, self).__init__() self.satt = SpatialAttention2d(dim) self.catt = GAB(dim) def forward(self, x): return self.satt(x) + self.catt(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x4 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp0 * tmp5 tmp7 = tmp3 + tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf1 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf2, primals_2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(4)](buf4, primals_4, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(16)](buf6, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_3[grid(256)](primals_2, buf0, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) return (buf7, primals_1, primals_2, primals_3, primals_5, buf0, buf2, buf4, buf6) class SpatialAttention2d(nn.Module): def __init__(self, channel): super(SpatialAttention2d, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.squeeze(x) z = self.sigmoid(z) return x * z class GAB(nn.Module): def __init__(self, input_dim, reduction=4): super(GAB, self).__init__() self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1) self.conv2 = nn.Conv2d(input_dim // reduction, input_dim, kernel_size=1, stride=1) self.relu = nn.ReLU(inplace=True) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.global_avgpool(x) z = self.relu(self.conv1(z)) z = self.sigmoid(self.conv2(z)) return x * z class SCseNew(nn.Module): def __init__(self, dim): super(SCseNew, self).__init__() self.satt = SpatialAttention2d(dim) self.catt = GAB(dim) def forward(self, input_0): primals_1 = self.satt.squeeze.weight primals_3 = self.catt.conv1.weight primals_4 = self.catt.conv1.bias primals_5 = self.catt.conv2.weight primals_6 = self.catt.conv2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
khodwe56/kaggle-birdsong-recognition
SCse
false
12,676
[ "MIT" ]
0
95a902c37355619cf02558968f000038e487db47
https://github.com/khodwe56/kaggle-birdsong-recognition/tree/95a902c37355619cf02558968f000038e487db47
NN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_1 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf7, 256, grid=grid(256), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), primals_8, buf7, primals_6, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NN(nn.Module): def __init__(self, input_size, h1, h2, h3, num_output): super(NN, self).__init__() self.fc1 = nn.Linear(input_size, h1) self.fc2 = nn.Linear(h1, h2) self.fc3 = nn.Linear(h2, h3) self.fc4 = nn.Linear(h3, num_output) def forward(self, x): x = torch.relu(self.fc1(x)) x = torch.tanh(self.fc2(x)) x = torch.relu(self.fc3(x)) x = self.fc4(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'h1': 4, 'h2': 4, 'h3': 4, 'num_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5, primals_7, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf3, reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), primals_8, buf7, primals_6, primals_4, buf8 class NNNew(nn.Module): def __init__(self, input_size, h1, h2, h3, num_output): super(NNNew, self).__init__() self.fc1 = nn.Linear(input_size, h1) self.fc2 = nn.Linear(h1, h2) self.fc3 = nn.Linear(h2, h3) self.fc4 = nn.Linear(h3, num_output) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
kgarg8/hypertune
NN
false
12,677
[ "MIT" ]
0
fbc4b87c9aefcd8449f6068232d7105975ff9dc9
https://github.com/kgarg8/hypertune/tree/fbc4b87c9aefcd8449f6068232d7105975ff9dc9
Clamp
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/pf/cpfgpnkmdrryehfgbovwrc76j5c4ffdgr5wzb56wfrblevosoooy.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clamp] # Source node to ATen node mapping: # x => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 30), kwargs = {}) triton_poi_fused_clamp_0 = async_compile.triton('triton_poi_fused_clamp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 30.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Clamp(nn.Module): """Clamp energy output""" def forward(self, x): x = torch.clamp(x, min=0, max=30) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 30.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ClampNew(nn.Module): """Clamp energy output""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kmiec96/mlhep-2021-baseline-track_1
Clamp
false
12,678
[ "Apache-2.0" ]
0
6fd2aa1529734204c522c49dba40fdc4b2bce353
https://github.com/kmiec96/mlhep-2021-baseline-track_1/tree/6fd2aa1529734204c522c49dba40fdc4b2bce353
NeuralNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # hidden => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/cp/ccp5m5apf7ka2skqyfxhf2df54c52qocprpycry7jrzoptyjvbti.py # Topologically Sorted Source Nodes: [action, mul], Original ATen: [aten.tanh, aten.mul] # Source node to ATen node mapping: # action => tanh # mul => mul # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 4), kwargs = {}) triton_poi_fused_mul_tanh_1 = async_compile.triton('triton_poi_fused_mul_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf7, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [hidden_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [action, mul], Original ATen: [aten.tanh, aten.mul] triton_poi_fused_mul_tanh_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NeuralNetwork(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, action_bound): super(NeuralNetwork, self).__init__() self.input_layer = nn.Linear(input_dim, hidden_dim) self.hidden_layer = nn.Linear(hidden_dim, hidden_dim) self.output_layer = nn.Linear(hidden_dim, output_dim) self.action_bound = action_bound def forward(self, inp): inp = torch.tensor(inp, dtype=torch.float) hidden = torch.relu(self.input_layer(inp)) hidden = torch.relu(self.hidden_layer(hidden)) action = torch.tanh(self.output_layer(hidden)) return self.action_bound * action def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4, 'output_dim': 4, 'action_bound': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf4, primals_6, buf6, primals_4, buf7 class NeuralNetworkNew(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, action_bound): super(NeuralNetworkNew, self).__init__() self.input_layer = nn.Linear(input_dim, hidden_dim) self.hidden_layer = nn.Linear(hidden_dim, hidden_dim) self.output_layer = nn.Linear(hidden_dim, output_dim) self.action_bound = action_bound def forward(self, input_0): primals_2 = self.input_layer.weight primals_3 = self.input_layer.bias primals_4 = self.hidden_layer.weight primals_5 = self.hidden_layer.bias primals_6 = self.output_layer.weight primals_7 = self.output_layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
keyshor/homework
NeuralNetwork
false
12,679
[ "MIT" ]
0
687f9edf73bbac8fc492dfd82d634c19a38f5aab
https://github.com/keyshor/homework/tree/687f9edf73bbac8fc492dfd82d634c19a38f5aab
UpSample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/uy/cuykwspdedcweei632sohoszydcdycwgfpbqfmts2rz3pbme75y3.py # Topologically Sorted Source Nodes: [up_x], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add] # Source node to ATen node mapping: # up_x => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min, clamp_min_2, clamp_min_3, convert_element_type, convert_element_type_1, convert_element_type_3, iota, mul, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 1.0), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {}) # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x6 = xindex x4 = (xindex // 48) x7 = xindex % 48 tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(out_ptr1 + (x7 + (64*x4)), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/he/che6nd6gb4vfumohfsklsm5klcp45g4jlpyy34ahgnzt6epbx2ro.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add_4, %primals_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (64*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/p6/cp6kjjnbx6x6vx773fn74my6orskkocw7j5ex7lhq72nsdcqflhu.py # Topologically Sorted Source Nodes: [conv2d, leaky_relu], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d => convolution # leaky_relu => gt, mul_5, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul_5), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf3, (4, 3, 4, 4), (64, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [up_x], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(primals_2, buf1, 192, grid=grid(192), stream=stream0) del primals_2 buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (64, 16, 4, 1), 48) # alias # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, buf2, 64, grid=grid(64), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, leaky_relu], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf4, primals_4, buf5, buf6, 256, grid=grid(256), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf9 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, leaky_relu_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_6, buf8, buf9, 256, grid=grid(256), stream=stream0) del buf7 del primals_6 return (buf9, primals_3, primals_5, buf3, buf5, buf6, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class UpSample(nn.Sequential): def __init__(self, skip_input, output_features): super(UpSample, self).__init__() self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluA = nn.LeakyReLU(0.2) self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluB = nn.LeakyReLU(0.2) def forward(self, x, concat_with): up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size (3)], mode='bilinear', align_corners=True) return self.leakyreluB(self.convB(self.leakyreluA(self.convA(torch. cat([up_x, concat_with], dim=1))))) def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [[], {'skip_input': 4, 'output_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex // 48 x7 = xindex % 48 tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(out_ptr1 + (x7 + 64 * x4), tmp38, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf3, (4, 3, 4, 4), (64, 16, 4, 1), 0) get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (192)](primals_2, buf1, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (64, 16, 4, 1), 48) triton_poi_fused_cat_1[grid(64)](primals_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf4, primals_4, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf9 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf7, primals_6, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del primals_6 return buf9, primals_3, primals_5, buf3, buf5, buf6, buf8 class UpSampleNew(nn.Sequential): def __init__(self, skip_input, output_features): super(UpSampleNew, self).__init__() self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluA = nn.LeakyReLU(0.2) self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluB = nn.LeakyReLU(0.2) def forward(self, input_0, input_1): primals_3 = self.convA.weight primals_4 = self.convA.bias primals_5 = self.convB.weight primals_6 = self.convB.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
kimtaehyeong/msnnff
UpSample
false
12,680
[ "MIT" ]
0
75586be601bbdbfafcdf4038bc08f239e119b417
https://github.com/kimtaehyeong/msnnff/tree/75586be601bbdbfafcdf4038bc08f239e119b417
nn_model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/az/cazao7d5hdb3kcfc76acvd3yerra6cq3h4spci3xujm27v6xwinj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1024, 1024), (1024, 1)) assert_size_stride(primals_5, (1024, ), (1, )) assert_size_stride(primals_6, (4, 1024), (1024, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 65536, grid=grid(65536), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0), reinterpret_tensor(primals_4, (1024, 1024), (1, 1024), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 65536, grid=grid(65536), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0), reinterpret_tensor(primals_6, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0), reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class nn_model(nn.Module): def __init__(self, feature_dim, num_classes): super(nn_model, self).__init__() self.l1 = nn.Linear(feature_dim, 1024) self.l2 = nn.Linear(1024, 1024) self.l3 = nn.Linear(1024, num_classes) def forward(self, x): x = F.relu(self.l1(x)) x = F.relu(self.l2(x)) x = self.l3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1024, 1024), (1024, 1)) assert_size_stride(primals_5, (1024,), (1,)) assert_size_stride(primals_6, (4, 1024), (1024, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf1, primals_2, buf6, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0 ), reinterpret_tensor(primals_4, (1024, 1024), (1, 1024), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf3, primals_5, buf5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0), reinterpret_tensor(primals_6, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0 ), reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0 ), primals_6, buf5, primals_4, buf6 class nn_modelNew(nn.Module): def __init__(self, feature_dim, num_classes): super(nn_modelNew, self).__init__() self.l1 = nn.Linear(feature_dim, 1024) self.l2 = nn.Linear(1024, 1024) self.l3 = nn.Linear(1024, num_classes) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
kiankd/quicksand
nn_model
false
12,681
[ "MIT" ]
0
20f9505c843eec00e423a0e1589ebd1e6264e174
https://github.com/kiankd/quicksand/tree/20f9505c843eec00e423a0e1589ebd1e6264e174
ConvMeanPool
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zl/czlj6w7bgqv6v6jwtuat5tk6hqgjbqda2njfcgonmqvlxwg22wnk.py # Topologically Sorted Source Nodes: [add, add_1, add_2, output_1], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # output_1 => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %slice_12), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %slice_16), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 4), kwargs = {}) triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 2 x4 = (xindex // 4) x2 = (xindex // 4) % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (6*x1) + (9*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (3 + (2*x0) + (9*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (6*x1) + (9*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 + (9*x4)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 0.25 tmp13 = tmp11 * tmp12 tl.store(out_ptr0 + (x6), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [add, add_1, add_2, output_1], Original ATen: [aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_0.run(buf0, primals_2, buf1, 64, grid=grid(64), stream=stream0) del buf0 del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class MyConvo2d(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True, stride=1, bias=True): super(MyConvo2d, self).__init__() self.he_init = he_init self.padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=self.padding, bias=bias) def forward(self, input): output = self.conv(input) return output class ConvMeanPool(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True): super(ConvMeanPool, self).__init__() self.he_init = he_init self.conv = MyConvo2d(input_dim, output_dim, kernel_size, he_init= self.he_init) def forward(self, input): output = self.conv(input) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x4 = xindex // 4 x2 = xindex // 4 % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 6 * x1 + 9 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (3 + 2 * x0 + 9 * x4), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (1 + 6 * x1 + 9 * x4), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (4 + 9 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 0.25 tmp13 = tmp11 * tmp12 tl.store(out_ptr0 + x6, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_0[grid(64)](buf0, primals_2, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_2 return buf1, primals_1, primals_3 class MyConvo2d(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True, stride=1, bias=True): super(MyConvo2d, self).__init__() self.he_init = he_init self.padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=self.padding, bias=bias) def forward(self, input): output = self.conv(input) return output class ConvMeanPoolNew(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True): super(ConvMeanPoolNew, self).__init__() self.he_init = he_init self.conv = MyConvo2d(input_dim, output_dim, kernel_size, he_init= self.he_init) def forward(self, input_0): primals_1 = self.conv.conv.weight primals_2 = self.conv.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kolchinski/humanception-score
ConvMeanPool
false
12,682
[ "MIT" ]
0
da8880eec3be39574718409cfe8ca303f41c64e6
https://github.com/kolchinski/humanception-score/tree/da8880eec3be39574718409cfe8ca303f41c64e6
Generator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/u5/cu5ua3ykbptipkew3i3zng4a7a4hy4f6xs547ovdooepce7uyfwz.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) del buf1 return (buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Generator(nn.Module): def __init__(self, hidden_size, output_size): super(Generator, self).__init__() self.hidden_size = hidden_size self.output_size = output_size self.out = nn.Linear(hidden_size, output_size) self.sm = nn.LogSoftmax(dim=1) def forward(self, inputs): assert inputs.size(1) == self.hidden_size return self.sm(self.out(inputs)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2 class GeneratorNew(nn.Module): def __init__(self, hidden_size, output_size): super(GeneratorNew, self).__init__() self.hidden_size = hidden_size self.output_size = output_size self.out = nn.Linear(hidden_size, output_size) self.sm = nn.LogSoftmax(dim=1) def forward(self, input_0): primals_2 = self.out.weight primals_3 = self.out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kompotiks/Boris
Generator
false
12,683
[ "Apache-2.0" ]
0
2cf9487e4bc8d81206f819c0fe5c1d793d554062
https://github.com/kompotiks/Boris/tree/2cf9487e4bc8d81206f819c0fe5c1d793d554062
AttentionGRUCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/lu/cluy2665mfjovqy3g6g6aluvvxdhqdcpfzmtdxndscg75r2od74w.py # Topologically Sorted Source Nodes: [add, r, mul, add_1, h_tilda, mul_1, sub, mul_2, h], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # h => add_2 # h_tilda => tanh # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # r => sigmoid # sub => sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %mul), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %tanh), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %expand), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex x0 = xindex % 4 x2 = xindex % 16 x4 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x5), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x5), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x2 + (16*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x5), xmask) tmp10 = tl.load(in_ptr5 + (x5), xmask) tmp17 = tl.load(in_ptr6 + (x5), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp11 = tmp7 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.tanh(tmp12) tmp14 = tmp8 * tmp13 tmp15 = 1.0 tmp16 = tmp15 - tmp8 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tl.store(in_out_ptr0 + (x5), tmp7, xmask) tl.store(out_ptr0 + (x5), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 del primals_8 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_10 del primals_9 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, r, mul, add_1, h_tilda, mul_1, sub, mul_2, h], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf2, primals_2, buf1, primals_5, primals_11, buf3, buf4, primals_6, buf5, 256, grid=grid(256), stream=stream0) del buf1 del primals_2 del primals_5 return (buf5, primals_11, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init class AttentionGRUCell(nn.Module): def __init__(self, input_size, hidden_size): super(AttentionGRUCell, self).__init__() self.hidden_size = hidden_size self.Wr = nn.Linear(input_size, hidden_size) init.xavier_normal_(self.Wr.state_dict()['weight']) self.Ur = nn.Linear(hidden_size, hidden_size) init.xavier_normal_(self.Ur.state_dict()['weight']) self.W = nn.Linear(input_size, hidden_size) init.xavier_normal_(self.W.state_dict()['weight']) self.U = nn.Linear(hidden_size, hidden_size) init.xavier_normal_(self.U.state_dict()['weight']) def forward(self, fact, C, g): """ fact.size() -> (#batch, #hidden = #embedding) c.size() -> (#hidden, ) -> (#batch, #hidden = #embedding) r.size() -> (#batch, #hidden = #embedding) h_tilda.size() -> (#batch, #hidden = #embedding) g.size() -> (#batch, ) """ r = F.sigmoid(self.Wr(fact) + self.Ur(C)) h_tilda = F.tanh(self.W(fact) + r * self.U(C)) g = g.unsqueeze(1).expand_as(h_tilda) h = g * h_tilda + (1 - g) * C return h def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex x0 = xindex % 4 x2 = xindex % 16 x4 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x5, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x5, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x2 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr4 + x5, xmask) tmp10 = tl.load(in_ptr5 + x5, xmask) tmp17 = tl.load(in_ptr6 + x5, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp11 = tmp7 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.tanh(tmp12) tmp14 = tmp8 * tmp13 tmp15 = 1.0 tmp16 = tmp15 - tmp8 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tl.store(in_out_ptr0 + x5, tmp7, xmask) tl.store(out_ptr0 + x5, tmp19, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf3) del primals_7 del primals_8 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf4) del primals_10 del primals_9 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf2, primals_2, buf1, primals_5, primals_11, buf3, buf4, primals_6, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_5 return buf5, primals_11, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, buf3, buf4 class AttentionGRUCellNew(nn.Module): def __init__(self, input_size, hidden_size): super(AttentionGRUCellNew, self).__init__() self.hidden_size = hidden_size self.Wr = nn.Linear(input_size, hidden_size) init.xavier_normal_(self.Wr.state_dict()['weight']) self.Ur = nn.Linear(hidden_size, hidden_size) init.xavier_normal_(self.Ur.state_dict()['weight']) self.W = nn.Linear(input_size, hidden_size) init.xavier_normal_(self.W.state_dict()['weight']) self.U = nn.Linear(hidden_size, hidden_size) init.xavier_normal_(self.U.state_dict()['weight']) def forward(self, input_0, input_1, input_2): primals_1 = self.Wr.weight primals_2 = self.Wr.bias primals_4 = self.Ur.weight primals_5 = self.Ur.bias primals_7 = self.W.weight primals_8 = self.W.bias primals_9 = self.U.weight primals_10 = self.U.bias primals_3 = input_0 primals_6 = input_1 primals_11 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
kirubarajan/Dynamic-Memory-Network-Plus
AttentionGRUCell
false
12,684
[ "Apache-2.0" ]
0
0613287ef5a959c7b260afcea2c31afcfb0ea189
https://github.com/kirubarajan/Dynamic-Memory-Network-Plus/tree/0613287ef5a959c7b260afcea2c31afcfb0ea189
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/z4/cz4rdmnjzva3wxtwkxdq32ntpuxr4xa3itqmggsv52y455k5rfxs.py # Topologically Sorted Source Nodes: [mul, sub, mul_1, alpha, eq, pred, sub_1, pt, sub_2, pow_1, mul_2, ce, mul_3], Original ATen: [aten.mul, aten.rsub, aten.add, aten.eq, aten.sigmoid, aten.where, aten.pow, aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # alpha => add # ce => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2 # eq => eq # mul => mul_1 # mul_1 => mul_2 # mul_2 => mul_3 # mul_3 => mul_4 # pow_1 => pow_1 # pred => sigmoid # pt => where # sub => sub_3 # sub_1 => sub_4 # sub_2 => sub_5 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 0.75), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 1), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %sigmoid, %sub_4), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %where), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_5, 2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %pow_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_2), kwargs = {}) triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0 = async_compile.triton('triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = 0.75 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = tmp0 == tmp3 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp3 - tmp10 tmp12 = tl.where(tmp8, tmp10, tmp11) tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp7 * tmp14 tmp16 = tmp4 * tmp9 tmp17 = 0.0 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl_math.abs(tmp9) tmp20 = -tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = libdevice.log1p(tmp21) tmp23 = tmp18 - tmp22 tmp24 = tmp16 - tmp23 tmp25 = tmp15 * tmp24 tl.store(out_ptr0 + (x0), tmp25, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sub, mul_1, alpha, eq, pred, sub_1, pt, sub_2, pow_1, mul_2, ce, mul_3], Original ATen: [aten.mul, aten.rsub, aten.add, aten.eq, aten.sigmoid, aten.where, aten.pow, aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.cuda import torch.distributed import torch.multiprocessing class FocalLoss(nn.Module): """Focal Loss - https://arxiv.org/abs/1708.02002""" def __init__(self, alpha=0.25, gamma=2): super().__init__() self.alpha = alpha self.gamma = gamma def forward(self, pred_logits, target): pred = pred_logits.sigmoid() ce = F.binary_cross_entropy_with_logits(pred_logits, target, reduction='none') alpha = target * self.alpha + (1.0 - target) * (1.0 - self.alpha) pt = torch.where(target == 1, pred, 1 - pred) return alpha * (1.0 - pt) ** self.gamma * ce def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.cuda import torch.distributed import torch.multiprocessing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp9 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = 0.75 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = tmp0 == tmp3 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp3 - tmp10 tmp12 = tl.where(tmp8, tmp10, tmp11) tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp7 * tmp14 tmp16 = tmp4 * tmp9 tmp17 = 0.0 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl_math.abs(tmp9) tmp20 = -tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = libdevice.log1p(tmp21) tmp23 = tmp18 - tmp22 tmp24 = tmp16 - tmp23 tmp25 = tmp15 * tmp24 tl.store(out_ptr0 + x0, tmp25, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0[ grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FocalLossNew(nn.Module): """Focal Loss - https://arxiv.org/abs/1708.02002""" def __init__(self, alpha=0.25, gamma=2): super().__init__() self.alpha = alpha self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
krisk84/retinanet-examples
FocalLoss
false
12,685
[ "BSD-3-Clause" ]
0
174d95f3aabe1746d105c66f87aa445607f4eab8
https://github.com/krisk84/retinanet-examples/tree/174d95f3aabe1746d105c66f87aa445607f4eab8
GlobalAveragePooling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.nn.functional as F class GlobalAveragePooling(nn.Module): def __init__(self): super(GlobalAveragePooling, self).__init__() def forward(self, feat): num_channels = feat.size(1) return F.avg_pool2d(feat, (feat.size(2), feat.size(3))).view(-1, num_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class GlobalAveragePoolingNew(nn.Module): def __init__(self): super(GlobalAveragePoolingNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kristinakupf/FeatureLearningRotNet
GlobalAveragePooling
false
12,686
[ "MIT" ]
0
d495bcfaed3e7a3ca92b7434f8ad6d7584ab173d
https://github.com/kristinakupf/FeatureLearningRotNet/tree/d495bcfaed3e7a3ca92b7434f8ad6d7584ab173d
KLDLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5v/c5vtwldilipwi3r3wx5rsxypbvfmsav7s7xybl5bf72d3sckrknw.py # Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, mul], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul] # Source node to ATen node mapping: # add => add # exp => exp # mul => mul # pow_1 => pow_1 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %pow_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %exp), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -0.5), kwargs = {}) triton_per_fused_add_exp_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = -0.5 tmp12 = tmp10 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, mul], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_exp_mul_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class KLDLoss(nn.Module): def forward(self, mu, logvar): return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = -0.5 tmp12 = tmp10 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_exp_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class KLDLossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kudoNCT/michigan_copy
KLDLoss
false
12,687
[ "MIT" ]
0
e857b96a65b270ef2506cb9866b7e01f117c4396
https://github.com/kudoNCT/michigan_copy/tree/e857b96a65b270ef2506cb9866b7e01f117c4396
GatedMaskedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/lk/clkvmfnc5ppj6ffcl2v3tvac6pbvz3y7mizzrdi65zokiejjhua3.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # pad => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 5 x0 = xindex % 6 x2 = (xindex // 30) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-1) + x0 tmp4 = tmp3 >= tmp1 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pj/cpjc45ogatjtexy7be6gijiwluzdhthxjvcnnqmgqlinbpzrsmbi.py # Topologically Sorted Source Nodes: [conv2d, pad_1], Original ATen: [aten.convolution, aten.constant_pad_nd] # Source node to ATen node mapping: # conv2d => convolution # pad_1 => constant_pad_nd_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %constant_pad_nd_1 : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [0, 0, 1, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_convolution_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 5 x4 = (xindex // 20) x5 = xindex % 20 x2 = (xindex // 20) % 8 x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + ((-4) + x5 + (16*x4)), tmp2 & xmask, other=0.0) tmp4 = tl.load(in_ptr1 + (x2), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp2, tmp5, tmp6) tl.store(out_ptr0 + (x6), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/es/ceshmhutmeo5xl2yl3dxwxrpje5njiwcfwwoj52wrkr3mcl5lhub.py # Topologically Sorted Source Nodes: [tanh, sigmoid, v_map_out], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # sigmoid => sigmoid # tanh => tanh # v_map_out => mul # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%slice_6,), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_8,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (20*x1) + (160*x2)), xmask) tmp2 = tl.load(in_ptr0 + (80 + x0 + (20*x1) + (160*x2)), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/4b/c4bwymcjfsik2yzrr5le4v2j3mroi4pfhlquggnmpgqwjxzshdbs.py # Topologically Sorted Source Nodes: [pad_2], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # pad_2 => constant_pad_nd_2 # Graph fragment: # %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_6, [1, 0, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_3 = async_compile.triton('triton_poi_fused_constant_pad_nd_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/md/cmdddio6ujcwbhy6hgzu6huywjuoh3klsp37w5ygk2v4gwwrdoev.py # Topologically Sorted Source Nodes: [tanh_1, sigmoid_1, h_out_2], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # h_out_2 => mul_1 # sigmoid_1 => sigmoid_1 # tanh_1 => tanh_1 # Graph fragment: # %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%slice_10,), kwargs = {}) # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_12,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_1, %sigmoid_1), kwargs = {}) triton_poi_fused_mul_sigmoid_tanh_4 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 64) x4 = xindex % 64 x1 = (xindex // 16) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x4 + (128*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + (128*x2)), xmask) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (64 + x4 + (128*x2)), xmask) tmp9 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (64 + x4 + (128*x2)), xmask) tmp12 = tl.load(in_ptr3 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp7 * tmp15 tl.store(out_ptr0 + (x3), tmp7, xmask) tl.store(out_ptr1 + (x3), tmp15, xmask) tl.store(out_ptr2 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ti/ctitm3fmik35mxgmabf5id22xrqrvhqyrxpmktt2s2eg77n2c7xt.py # Topologically Sorted Source Nodes: [h_map_out, h_map_out_1], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # h_map_out => convolution_3 # h_map_out_1 => add_1 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_1, %primals_9, %primals_10, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_3, %primals_6), kwargs = {}) triton_poi_fused_add_convolution_5 = async_compile.triton('triton_poi_fused_add_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (8, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (8, 4, 1, 2), (8, 2, 2, 1)) assert_size_stride(primals_8, (8, ), (1, )) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 480, grid=grid(480), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4, 4), (128, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 5, 4), (160, 20, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, pad_1], Original ATen: [aten.convolution, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_convolution_1.run(buf1, primals_3, buf2, 640, grid=grid(640), stream=stream0) del buf1 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, sigmoid, v_map_out], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_tanh_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [vh], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 4, 4), (128, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [pad_2], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_3.run(primals_6, buf5, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [h_out], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 4, 4), (128, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh_1, sigmoid_1, h_out_2], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_tanh_4.run(buf6, primals_8, buf4, primals_5, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0) del buf4 del buf6 del primals_5 del primals_8 # Topologically Sorted Source Nodes: [h_map_out], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [h_map_out, h_map_out_1], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_5.run(buf11, primals_10, primals_6, 256, grid=grid(256), stream=stream0) del primals_10 del primals_6 return (buf3, buf11, primals_2, primals_4, primals_7, primals_9, buf0, reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), buf5, buf7, buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4, 2, 3), (24, 6, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, 4, 1, 2), (8, 2, 2, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn import torch.nn.functional as F class GatedMaskedConv2d(nn.Module): def __init__(self, in_dim, out_dim=None, kernel_size=3, mask='B'): super(GatedMaskedConv2d, self).__init__() if out_dim is None: out_dim = in_dim self.dim = out_dim self.size = kernel_size self.mask = mask pad = self.size // 2 self.v_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(pad + 1, self.size)) self.v_pad1 = nn.ConstantPad2d((pad, pad, pad, 0), 0) self.v_pad2 = nn.ConstantPad2d((0, 0, 1, 0), 0) self.vh_conv = nn.Conv2d(2 * self.dim, 2 * self.dim, kernel_size=1) self.h_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(1, pad + 1)) self.h_pad1 = nn.ConstantPad2d((self.size // 2, 0, 0, 0), 0) self.h_pad2 = nn.ConstantPad2d((1, 0, 0, 0), 0) self.h_conv_res = nn.Conv2d(self.dim, self.dim, 1) def forward(self, v_map, h_map): v_out = self.v_pad2(self.v_conv(self.v_pad1(v_map)))[:, :, :-1, :] v_map_out = F.tanh(v_out[:, :self.dim]) * F.sigmoid(v_out[:, self.dim:] ) vh = self.vh_conv(v_out) h_out = self.h_conv(self.h_pad1(h_map)) if self.mask == 'A': h_out = self.h_pad2(h_out)[:, :, :, :-1] h_out = h_out + vh h_out = F.tanh(h_out[:, :self.dim]) * F.sigmoid(h_out[:, self.dim:]) h_map_out = self.h_conv_res(h_out) if self.mask == 'B': h_map_out = h_map_out + h_map return v_map_out, h_map_out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 5 x0 = xindex % 6 x2 = xindex // 30 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -1 + x0 tmp4 = tmp3 >= tmp1 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 5 x4 = xindex // 20 x5 = xindex % 20 x2 = xindex // 20 % 8 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-4 + x5 + 16 * x4), tmp2 & xmask, other=0.0) tmp4 = tl.load(in_ptr1 + x2, tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp2, tmp5, tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1 + 160 * x2), xmask) tmp2 = tl.load(in_ptr0 + (80 + x0 + 20 * x1 + 160 * x2), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x4 = xindex % 64 x1 = xindex // 16 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 128 * x2), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 128 * x2), xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (64 + x4 + 128 * x2), xmask) tmp9 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (64 + x4 + 128 * x2), xmask) tmp12 = tl.load(in_ptr3 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp7 * tmp15 tl.store(out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr1 + x3, tmp15, xmask) tl.store(out_ptr2 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (8, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (8, 4, 1, 2), (8, 2, 2, 1)) assert_size_stride(primals_8, (8,), (1,)) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(480)](primals_1, buf0, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4, 4), (128, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 5, 4), (160, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_1[grid(640)](buf1, primals_3, buf2, 640, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_2[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 4, 4), (128, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_3[grid(320)](primals_6, buf5, 320, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 4, 4), (128, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_4[grid(256)](buf6, primals_8, buf4, primals_5, buf7, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del buf6 del primals_5 del primals_8 buf10 = extern_kernels.convolution(buf9, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_add_convolution_5[grid(256)](buf11, primals_10, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_10 del primals_6 return (buf3, buf11, primals_2, primals_4, primals_7, primals_9, buf0, reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), buf5, buf7, buf8, buf9) class GatedMaskedConv2dNew(nn.Module): def __init__(self, in_dim, out_dim=None, kernel_size=3, mask='B'): super(GatedMaskedConv2dNew, self).__init__() if out_dim is None: out_dim = in_dim self.dim = out_dim self.size = kernel_size self.mask = mask pad = self.size // 2 self.v_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(pad + 1, self.size)) self.v_pad1 = nn.ConstantPad2d((pad, pad, pad, 0), 0) self.v_pad2 = nn.ConstantPad2d((0, 0, 1, 0), 0) self.vh_conv = nn.Conv2d(2 * self.dim, 2 * self.dim, kernel_size=1) self.h_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(1, pad + 1)) self.h_pad1 = nn.ConstantPad2d((self.size // 2, 0, 0, 0), 0) self.h_pad2 = nn.ConstantPad2d((1, 0, 0, 0), 0) self.h_conv_res = nn.Conv2d(self.dim, self.dim, 1) def forward(self, input_0, input_1): primals_2 = self.v_conv.weight primals_3 = self.v_conv.bias primals_4 = self.vh_conv.weight primals_5 = self.vh_conv.bias primals_7 = self.h_conv.weight primals_8 = self.h_conv.bias primals_9 = self.h_conv_res.weight primals_10 = self.h_conv_res.bias primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0], output[1]
kj141/vae-lagging-encoder
GatedMaskedConv2d
false
12,688
[ "MIT" ]
0
79dda8baed0129bc8234b7602332a54210164fbc
https://github.com/kj141/vae-lagging-encoder/tree/79dda8baed0129bc8234b7602332a54210164fbc
DuelingDQN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/hy/chyn4ucj7uhqavrcrhxk2c5izzfdiw63bn3glmpyn3tpx5bpigdc.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] # Source node to ATen node mapping: # x => relu # Graph fragment: # %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_4,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/b7/cb7yiqdigd2vu5it7f2y6axob3bgvkx2ecs3nmymezsrlxsu2jhl.py # Topologically Sorted Source Nodes: [v], Original ATen: [aten.relu] # Source node to ATen node mapping: # v => relu_2 # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pf/cpfaffniytsvapzbt4cn2nrmt3t4gkebunfzmckizel5dlo2qgjo.py # Topologically Sorted Source Nodes: [add, x_2], Original ATen: [aten.add, aten.sub] # Source node to ATen node mapping: # add => add # x_2 => sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %addmm_5), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %expand_1), kwargs = {}) triton_poi_fused_add_sub_2 = async_compile.triton('triton_poi_fused_add_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp6 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp12 = tmp10 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = tmp5 - tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (32, 128), (128, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (1, 32), (32, 1)) assert_size_stride(primals_9, (1, ), (1, )) assert_size_stride(primals_10, (32, 128), (128, 1)) assert_size_stride(primals_11, (32, ), (1, )) assert_size_stride(primals_12, (4, 32), (32, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_2, 512, grid=grid(512), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, primals_5, 512, grid=grid(512), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (128, 32), (1, 128), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [v], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf5, primals_7, 128, grid=grid(128), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (32, 1), (1, 32), 0), out=buf6) buf7 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_10, (128, 32), (1, 128), 0), out=buf7) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf8, primals_11, 128, grid=grid(128), stream=stream0) del primals_11 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf8, reinterpret_tensor(primals_12, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf9) del primals_13 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, x_2], Original ATen: [aten.add, aten.sub] triton_poi_fused_add_sub_2.run(buf6, primals_9, buf9, buf10, 16, grid=grid(16), stream=stream0) del buf6 del buf9 del primals_9 return (buf10, primals_3, buf1, buf3, buf5, buf8, primals_12, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((32, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class DuelingDQN(nn.Module): def __init__(self, state_size, action_size, seed): super(DuelingDQN, self).__init__() torch.manual_seed(seed) self.state_size = state_size self.action_size = action_size self.fc1 = nn.Linear(state_size, 128) self.fc2 = nn.Linear(128, 128) self.value_fc1 = nn.Linear(128, 32) self.value_activation = nn.Linear(32, 1) self.advantage_fc1 = nn.Linear(128, 32) self.advantage_activation = nn.Linear(32, action_size) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) v = F.relu(self.value_fc1(x)) v = self.value_activation(v).expand(x.size(0), self.action_size) a = F.relu(self.advantage_fc1(x)) a = self.advantage_activation(a) x = v + a - a.mean(1).unsqueeze(1).expand(x.size(0), self.action_size) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp12 = tmp10 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = tmp5 - tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (32, 128), (128, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (1, 32), (32, 1)) assert_size_stride(primals_9, (1,), (1,)) assert_size_stride(primals_10, (32, 128), (128, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (4, 32), (32, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(512)](buf1, primals_2, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (128, 128), ( 1, 128), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(512)](buf3, primals_5, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (128, 32), (1, 128), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_1[grid(128)](buf5, primals_7, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (32, 1), (1, 32), 0), out=buf6) buf7 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_10, (128, 32), ( 1, 128), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_1[grid(128)](buf8, primals_11, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_11 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf8, reinterpret_tensor( primals_12, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf9) del primals_13 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_sub_2[grid(16)](buf6, primals_9, buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf6 del buf9 del primals_9 return (buf10, primals_3, buf1, buf3, buf5, buf8, primals_12, primals_10, primals_8, primals_6, primals_4) class DuelingDQNNew(nn.Module): def __init__(self, state_size, action_size, seed): super(DuelingDQNNew, self).__init__() torch.manual_seed(seed) self.state_size = state_size self.action_size = action_size self.fc1 = nn.Linear(state_size, 128) self.fc2 = nn.Linear(128, 128) self.value_fc1 = nn.Linear(128, 32) self.value_activation = nn.Linear(32, 1) self.advantage_fc1 = nn.Linear(128, 32) self.advantage_activation = nn.Linear(32, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.value_fc1.weight primals_7 = self.value_fc1.bias primals_8 = self.value_activation.weight primals_9 = self.value_activation.bias primals_10 = self.advantage_fc1.weight primals_11 = self.advantage_fc1.bias primals_12 = self.advantage_activation.weight primals_13 = self.advantage_activation.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
kscharpf/drlnd_p1_navigation
DuelingDQN
false
12,689
[ "MIT" ]
0
7f5e2aebcabb9d94c45a2fa7e9e8baec5c4b7a00
https://github.com/kscharpf/drlnd_p1_navigation/tree/7f5e2aebcabb9d94c45a2fa7e9e8baec5c4b7a00
SmoothL1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/6j/c6jfepznpyw27icx3r43btm73cvmjgsugaqhhbluj37jhqqgggsz.py # Topologically Sorted Source Nodes: [sub, x, ge, l1, pow_1, mul, l2, where], Original ATen: [aten.sub, aten.abs, aten.ge, aten.pow, aten.mul, aten.div, aten.where] # Source node to ATen node mapping: # ge => ge # l1 => sub_1 # l2 => div # mul => mul # pow_1 => pow_1 # sub => sub # where => where # x => abs_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%abs_1, 0.11), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.055), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 0.11), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge, %sub_1, %div), kwargs = {}) triton_poi_fused_abs_div_ge_mul_pow_sub_where_0 = async_compile.triton('triton_poi_fused_abs_div_ge_mul_pow_sub_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_div_ge_mul_pow_sub_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_div_ge_mul_pow_sub_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 0.11 tmp5 = tmp3 >= tmp4 tmp6 = 0.055 tmp7 = tmp3 - tmp6 tmp8 = tmp3 * tmp3 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = 9.090909090909092 tmp12 = tmp10 * tmp11 tmp13 = tl.where(tmp5, tmp7, tmp12) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, x, ge, l1, pow_1, mul, l2, where], Original ATen: [aten.sub, aten.abs, aten.ge, aten.pow, aten.mul, aten.div, aten.where] stream0 = get_raw_stream(0) triton_poi_fused_abs_div_ge_mul_pow_sub_where_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed import torch.multiprocessing class SmoothL1Loss(nn.Module): """Smooth L1 Loss""" def __init__(self, beta=0.11): super().__init__() self.beta = beta def forward(self, pred, target): x = (pred - target).abs() l1 = x - 0.5 * self.beta l2 = 0.5 * x ** 2 / self.beta return torch.where(x >= self.beta, l1, l2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.cuda import torch.distributed import torch.multiprocessing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_div_ge_mul_pow_sub_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 0.11 tmp5 = tmp3 >= tmp4 tmp6 = 0.055 tmp7 = tmp3 - tmp6 tmp8 = tmp3 * tmp3 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = 9.090909090909092 tmp12 = tmp10 * tmp11 tmp13 = tl.where(tmp5, tmp7, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_div_ge_mul_pow_sub_where_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SmoothL1LossNew(nn.Module): """Smooth L1 Loss""" def __init__(self, beta=0.11): super().__init__() self.beta = beta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
krisk84/retinanet-examples
SmoothL1Loss
false
12,690
[ "BSD-3-Clause" ]
0
174d95f3aabe1746d105c66f87aa445607f4eab8
https://github.com/krisk84/retinanet-examples/tree/174d95f3aabe1746d105c66f87aa445607f4eab8
GELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/nh/cnhx37tsffx4r7taj3xi72s7yfpnnccem24fupfbht6b7bzliavu.py # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] # Source node to ATen node mapping: # gelu => add, erf, mul, mul_1, mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] stream0 = get_raw_stream(0) triton_poi_fused_gelu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F class GELU(nn.Module): def forward(self, input): return F.gelu(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GELUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kwonyos/decision-transformer
GELU
false
12,691
[ "MIT" ]
0
c3ad7df28a897a016dd24c5337cb871d1f33f456
https://github.com/kwonyos/decision-transformer/tree/c3ad7df28a897a016dd24c5337cb871d1f33f456
WeightedFeatureFusion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fs/cfs4xkoaiu25wxx5ko6j355loos24sadbddfu2644hsgmchy36go.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add] # Source node to ATen node mapping: # x => add # x_1 => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %select), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %select_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tmp2 + tmp1 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((5, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class WeightedFeatureFusion(nn.Module): def __init__(self, layers, weight=False): super(WeightedFeatureFusion, self).__init__() self.layers = layers self.weight = weight self.n = len(layers) + 1 if weight: self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True) def forward(self, x, outputs): if self.weight: w = torch.sigmoid(self.w) * (2 / self.n) x = x * w[0] nx = x.shape[1] for i in range(self.n - 1): a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[ self.layers[i]] na = a.shape[1] if nx == na: x = x + a elif nx > na: x[:, :na] = x[:, :na] + a else: x = x + a[:, :nx] return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([5, 4, 4, 4])] def get_init_inputs(): return [[], {'layers': [4, 4]}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tmp2 + tmp1 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class WeightedFeatureFusionNew(nn.Module): def __init__(self, layers, weight=False): super(WeightedFeatureFusionNew, self).__init__() self.layers = layers self.weight = weight self.n = len(layers) + 1 if weight: self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ks1320/Traffic-Surveillance-System
WeightedFeatureFusion
false
12,692
[ "MIT" ]
0
fa1eb2a3a3d494c798fa2eeb0528ef48b1978332
https://github.com/ks1320/Traffic-Surveillance-System/tree/fa1eb2a3a3d494c798fa2eeb0528ef48b1978332
Reorg
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/l5/cl5b6spvyoawk3rk5eatjxs6crkxt5h56dutf76hem45gsxd2mev.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_2, %slice_4, %slice_6, %slice_8], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 4) % 16 x0 = xindex % 2 x1 = (xindex // 2) % 2 x3 = (xindex // 64) x4 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2) + (64*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1) + (16*((-4) + x2)) + (64*x3)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1) + (16*((-8) + x2)) + (64*x3)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1) + (16*((-12) + x2)) + (64*x3)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x4), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class Reorg(nn.Module): def forward(self, x): return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4 % 16 x0 = xindex % 2 x1 = xindex // 2 % 2 x3 = xindex // 64 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 * x3), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 * x3), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x4, tmp22, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ReorgNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ks1320/Traffic-Surveillance-System
Reorg
false
12,693
[ "MIT" ]
0
fa1eb2a3a3d494c798fa2eeb0528ef48b1978332
https://github.com/ks1320/Traffic-Surveillance-System/tree/fa1eb2a3a3d494c798fa2eeb0528ef48b1978332
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/l4/cl4boort6vfsvh6h6bfd4lck36kbmtipkqcrnhckuuxer6sfib77.py # Topologically Sorted Source Nodes: [temp_1], Original ATen: [aten.zeros] # Source node to ATen node mapping: # temp_1 => full # Graph fragment: # %full : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_0 = async_compile.triton('triton_poi_fused_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wq/cwqkfc7efcgiuv6rsa3stkinyzeft7fq5wl4uyfa53emahjnunte.py # Topologically Sorted Source Nodes: [temp_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # temp_3 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [temp_1], Original ATen: [aten.zeros] stream0 = get_raw_stream(0) triton_poi_fused_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [temp_1], Original ATen: [aten._sparse_addmm] buf1 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) del primals_1 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf3) del primals_3 buf4 = buf3; del buf3 # reuse buf8 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [temp_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf4, primals_4, buf8, 16, grid=grid(16), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [temp_5], Original ATen: [aten._sparse_addmm] buf5 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(buf4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) del buf0 buf6 = buf5 del buf5 buf7 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [temp_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_6 return (buf7, reinterpret_tensor(buf2, (4, 4), (1, 4), 0), reinterpret_tensor(buf6, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), primals_2, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class GCN(nn.Module): def __init__(self, dim_nd, dim_ft, dim_hd, dim_ot, drop_rate=0.5): super(GCN, self).__init__() self.lin1 = nn.Linear(dim_ft, dim_hd) self.lin2 = nn.Linear(dim_hd, dim_ot) self.act1 = F.relu self.act2 = nn.Softmax self.drop1 = nn.Dropout(p=drop_rate) self.drop2 = nn.Dropout(p=drop_rate) def forward(self, A, X): temp = self.drop1(X) temp = torch.sparse.mm(A, temp) temp = self.lin1(temp) temp = self.act1(temp) temp = self.drop2(temp) temp = torch.sparse.mm(A, temp) temp = self.lin2(temp) output = temp return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim_nd': 4, 'dim_ft': 4, 'dim_hd': 4, 'dim_ot': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4 ), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) del primals_1 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf3) del primals_3 buf4 = buf3 del buf3 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf4, primals_4, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf5 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(buf4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) del buf0 buf6 = buf5 del buf5 buf7 = buf4 del buf4 extern_kernels.addmm(primals_6, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf7) del primals_6 return buf7, reinterpret_tensor(buf2, (4, 4), (1, 4), 0 ), reinterpret_tensor(buf6, (4, 4), (1, 4), 0), reinterpret_tensor( primals_5, (4, 4), (1, 4), 0), primals_2, buf8 class GCNNew(nn.Module): def __init__(self, dim_nd, dim_ft, dim_hd, dim_ot, drop_rate=0.5): super(GCNNew, self).__init__() self.lin1 = nn.Linear(dim_ft, dim_hd) self.lin2 = nn.Linear(dim_hd, dim_ot) self.act1 = F.relu self.act2 = nn.Softmax self.drop1 = nn.Dropout(p=drop_rate) self.drop2 = nn.Dropout(p=drop_rate) def forward(self, input_0, input_1): primals_1 = self.lin1.weight primals_4 = self.lin1.bias primals_2 = self.lin2.weight primals_6 = self.lin2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
lanseyege/Graph
GCN
false
12,694
[ "MIT" ]
0
ec94502ea59d2b68de095d8160f37aa22d26f8cb
https://github.com/lanseyege/Graph/tree/ec94502ea59d2b68de095d8160f37aa22d26f8cb
DQN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/sn/csnsms5tdtjok5uxcwcbko2ioqfann3pwnmkfhlujgvnsujd5bud.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 1225) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/f4/cf4q74veoggsxdgdkl43ap6cyqfylpfk3qs7wdqoebyfzzb36dvw.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jd/cjdph23oasfased5f2dfu7kch7qcwjhegz6fxsrsn22yzjy3qj2u.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 196) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/b5/cb5bmriikeb3z65rmk4n4vz3fvd4pzjrhfemonu665rzgwpxeamm.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_4 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (512, 3136), (3136, 1)) assert_size_stride(primals_9, (512, ), (1, )) assert_size_stride(primals_10, (4, 512), (512, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 156800, grid=grid(156800), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 65536, grid=grid(65536), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 14, 14), (12544, 196, 14, 1)) buf5 = buf4; del buf4 # reuse buf9 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf5, primals_7, buf9, 50176, grid=grid(50176), stream=stream0) del primals_7 buf6 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0), reinterpret_tensor(primals_8, (3136, 512), (1, 3136), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] triton_poi_fused_relu_3.run(buf7, primals_9, 8192, grid=grid(8192), stream=stream0) del primals_9 buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf7, reinterpret_tensor(primals_10, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf8) del primals_11 return (buf8, primals_1, primals_3, primals_4, primals_6, buf1, buf3, reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0), buf7, primals_10, primals_8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 8, 8), (256, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 144, 144), (82944, 20736, 144, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 3136), (3136, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class DQN(nn.Module): """Initialize a deep Q-learning network Hints: ----- Original paper for DQN https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf This is just a hint. You can build your own structure. """ def __init__(self, gamma, ddqn=False, in_channels=4, num_actions=4): """ Parameters: ----------- in_channels: number of channel of input. i.e The number of most recent frames stacked together, here we use 4 frames, which means each state in Breakout is composed of 4 frames. num_actions: number of action-value to output, one-to-one correspondence to action in game. You can add additional arguments as you need. In the constructor we instantiate modules and assign them as member variables. """ super(DQN, self).__init__() self.gamma = gamma self.ddqn = ddqn self.in_channels = in_channels self.num_actions = num_actions self.pool = nn.MaxPool2d(2, 2) self.conv_1 = nn.Conv2d(self.in_channels, 32, kernel_size=8, stride=4) self.conv_2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.conv_3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.fc_1 = nn.Linear(64 * 7 * 7, 512) self.output_layer = nn.Linear(512, self.num_actions) def forward(self, x): """ In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors. """ """Execute a forward propagation step for the neural network. Args: x: An observation in the form of a (4, 84, 84) tensor. """ x = F.relu(self.conv_1(x)) x = F.relu(self.conv_2(x)) x = F.relu(self.conv_3(x)) x = x.reshape(-1, 64 * 7 * 7) x = F.relu(self.fc_1(x)) x = self.output_layer(x) return x def compute_loss(self, tensor_lst, target_network, criterion): """Computes loss between target-Q function and current Q-function. Args: tensor_lst: A list of 5 tensors - current states, current actions, current rewards, terminal state booleans, and next states. Returns: Loss values in the form of a PyTorch tensor. """ obs, act, rew, done, next_obs = tensor_lst with torch.no_grad(): if self.ddqn: next_state_training_q_vals = self(next_obs) max_training_q_vals_ind = next_state_training_q_vals.argmax(dim =1, keepdim=True) next_state_target_q_vals = target_network(next_obs) max_target_q_vals = torch.gather(input= next_state_target_q_vals, dim=1, index= max_training_q_vals_ind) target_q_vals = rew + self.gamma * (1 - done ) * max_target_q_vals else: next_state_target_q_vals = target_network(next_obs) max_target_q_vals = next_state_target_q_vals.max(dim=1, keepdim=True)[0] target_q_vals = rew + self.gamma * (1 - done ) * max_target_q_vals q_vals = self(obs) actual_q_vals = torch.gather(input=q_vals, dim=1, index=act) return criterion(actual_q_vals, target_q_vals) def get_inputs(): return [torch.rand([4, 4, 144, 144])] def get_init_inputs(): return [[], {'gamma': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1225 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (512, 3136), (3136, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (4, 512), (512, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(156800)](buf1, primals_2, 156800, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 14, 14), (12544, 196, 14, 1)) buf5 = buf4 del buf4 buf9 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(50176)]( buf5, primals_7, buf9, 50176, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_7 buf6 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0 ), reinterpret_tensor(primals_8, (3136, 512), (1, 3136), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_3[grid(8192)](buf7, primals_9, 8192, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, buf7, reinterpret_tensor( primals_10, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf8) del primals_11 return (buf8, primals_1, primals_3, primals_4, primals_6, buf1, buf3, reinterpret_tensor(buf5, (16, 3136), (3136, 1), 0), buf7, primals_10, primals_8, buf9) class DQNNew(nn.Module): """Initialize a deep Q-learning network Hints: ----- Original paper for DQN https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf This is just a hint. You can build your own structure. """ def __init__(self, gamma, ddqn=False, in_channels=4, num_actions=4): """ Parameters: ----------- in_channels: number of channel of input. i.e The number of most recent frames stacked together, here we use 4 frames, which means each state in Breakout is composed of 4 frames. num_actions: number of action-value to output, one-to-one correspondence to action in game. You can add additional arguments as you need. In the constructor we instantiate modules and assign them as member variables. """ super(DQNNew, self).__init__() self.gamma = gamma self.ddqn = ddqn self.in_channels = in_channels self.num_actions = num_actions self.pool = nn.MaxPool2d(2, 2) self.conv_1 = nn.Conv2d(self.in_channels, 32, kernel_size=8, stride=4) self.conv_2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.conv_3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.fc_1 = nn.Linear(64 * 7 * 7, 512) self.output_layer = nn.Linear(512, self.num_actions) def compute_loss(self, tensor_lst, target_network, criterion): """Computes loss between target-Q function and current Q-function. Args: tensor_lst: A list of 5 tensors - current states, current actions, current rewards, terminal state booleans, and next states. Returns: Loss values in the form of a PyTorch tensor. """ obs, act, rew, done, next_obs = tensor_lst with torch.no_grad(): if self.ddqn: next_state_training_q_vals = self(next_obs) max_training_q_vals_ind = next_state_training_q_vals.argmax(dim =1, keepdim=True) next_state_target_q_vals = target_network(next_obs) max_target_q_vals = torch.gather(input= next_state_target_q_vals, dim=1, index= max_training_q_vals_ind) target_q_vals = rew + self.gamma * (1 - done ) * max_target_q_vals else: next_state_target_q_vals = target_network(next_obs) max_target_q_vals = next_state_target_q_vals.max(dim=1, keepdim=True)[0] target_q_vals = rew + self.gamma * (1 - done ) * max_target_q_vals q_vals = self(obs) actual_q_vals = torch.gather(input=q_vals, dim=1, index=act) return criterion(actual_q_vals, target_q_vals) def forward(self, input_0): primals_1 = self.conv_1.weight primals_2 = self.conv_1.bias primals_4 = self.conv_2.weight primals_5 = self.conv_2.bias primals_6 = self.conv_3.weight primals_7 = self.conv_3.bias primals_8 = self.fc_1.weight primals_9 = self.fc_1.bias primals_10 = self.output_layer.weight primals_11 = self.output_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
khaiyichin/DS595-RL-Projects
DQN
false
12,695
[ "MIT" ]
0
4add6b2adc2cb9f7cdb783d50b005ecd1b4aada3
https://github.com/khaiyichin/DS595-RL-Projects/tree/4add6b2adc2cb9f7cdb783d50b005ecd1b4aada3
BasicBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fv/cfvfxpmpur3qlmurffwz4u56tgvw75i4lbjvzd25ortunbobyxnh.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten._native_batch_norm_legit, aten.relu] # Source node to ATen node mapping: # out_1 => add, rsqrt, var_mean # out_2 => relu # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) triton_per_fused__native_batch_norm_legit_relu_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(out_ptr2 + (r1 + (16*x0)), tmp25, xmask) tl.store(out_ptr3 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/lk/clk53igo2wowtd6pq5zu23svybsi67ef5dmegrb3qnpketcev22z.py # Topologically Sorted Source Nodes: [out_4, out_6], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_4 => add_1, rsqrt_1, var_mean_1 # out_6 => relu_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_8,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_16, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(out_ptr2 + (r1 + (16*x0)), tmp27, xmask) tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask) tl.store(out_ptr4 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten._native_batch_norm_legit, aten.relu] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_0.run(buf0, buf1, buf5, buf4, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_4, out_6], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1.run(buf6, primals_1, buf7, buf11, buf12, buf10, 16, 16, grid=grid(16), stream=stream0) return (buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor(buf4, (16, ), (1, ), 0), buf5, buf6, reinterpret_tensor(buf10, (16, ), (1, ), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super(BasicBlock, self).__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(out_ptr2 + (r1 + 16 * x0), tmp25, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0 , in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(out_ptr2 + (r1 + 16 * x0), tmp27, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_0[grid(16)](buf0, buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1[ grid(16)](buf6, primals_1, buf7, buf11, buf12, buf10, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) return buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor( buf4, (16,), (1,), 0), buf5, buf6, reinterpret_tensor(buf10, (16,), (1,), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlockNew(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super(BasicBlockNew, self).__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kudoNCT/michigan_copy
BasicBlock
false
12,696
[ "MIT" ]
0
e857b96a65b270ef2506cb9866b7e01f117c4396
https://github.com/kudoNCT/michigan_copy/tree/e857b96a65b270ef2506cb9866b7e01f117c4396
FeaturePyramidNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fp/cfp5jrxxyxrvhcpoq5tio3p5tkhj5ugdrpyur3x4v6meatzih7jn.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7u/c7uvmjdpe24ml4xacimpqeftnz5qbzwwxfcng6bdrqmwamnyk7ss.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (64*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x2) + (256*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a5/ca5rooc2sujjgcaib3xl3m5z2la47xrbulj6xmbbyvv4xwpw3s6w.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 256], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x2) + (1024*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rw/crwxihz2xdn6vknnrjr5if7hyms65a7dv6ub7vsls72ck5xfuwfz.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qg/cqgn2jt4v5tmejshiwcthgkyxdgowdb3qkqsaksyfh4thcwnz6hg.py # Topologically Sorted Source Nodes: [P5_upsampled_x], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # P5_upsampled_x => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/oi/coik5p6vp7gpgli2c742eorprieg3ksiwhta5jh2rgrjqcmzhvdb.py # Topologically Sorted Source Nodes: [P4_upsampled_x], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # P4_upsampled_x => add_5, add_6, convert_element_type_4, convert_element_type_5, iota_2, mul_4, mul_5 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_5, torch.float32), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 0.5), kwargs = {}) # %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_5 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_5(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2d/c2dwrwxe3kh7pkzyoat4x7ctodoe4q4o5pydqbfgjk276hr4ckom.py # Topologically Sorted Source Nodes: [P5_x, P5_upsampled_x, P4_x, P4_x_1, P4_upsampled_x, P3_x, P3_x_1], Original ATen: [aten.convolution, aten._unsafe_index, aten.add] # Source node to ATen node mapping: # P3_x => convolution_4 # P3_x_1 => add_9 # P4_upsampled_x => _unsafe_index_1 # P4_x => convolution_2 # P4_x_1 => add_4 # P5_upsampled_x => _unsafe_index # P5_x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_8, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %convolution_2), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_4, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {}) # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_13, %primals_11, %primals_12, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_4, %_unsafe_index_1), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_6 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*i64', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 256 x2 = (xindex // 4096) % 16 x1 = (xindex // 256) % 16 x3 = (xindex // 65536) tmp0 = tl.load(in_out_ptr0 + (x4), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tl.full([XBLOCK], 8, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tmp9 = tmp8 + tmp4 tmp10 = tmp8 < 0 tmp11 = tl.where(tmp10, tmp9, tmp8) tmp12 = tl.load(in_ptr2 + (tmp7), None, eviction_policy='evict_last') tmp13 = tl.full([XBLOCK], 4, tl.int32) tmp14 = tmp12 + tmp13 tmp15 = tmp12 < 0 tmp16 = tl.where(tmp15, tmp14, tmp12) tmp17 = tl.load(in_ptr2 + (tmp11), None, eviction_policy='evict_last') tmp18 = tmp17 + tmp13 tmp19 = tmp17 < 0 tmp20 = tl.where(tmp19, tmp18, tmp17) tmp21 = tl.load(in_ptr3 + (x0 + (256*tmp20) + (1024*tmp16) + (4096*x3)), None) tmp23 = tmp21 + tmp22 tmp24 = tl.load(in_ptr5 + (x0 + (256*tmp11) + (2048*tmp7) + (16384*x3)), None) tmp26 = tmp24 + tmp25 tmp27 = tmp23 + tmp26 tmp28 = tmp2 + tmp27 tl.store(in_out_ptr0 + (x4), tmp28, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tz/ctzskgnn2nzm5hhtqatyoshow3yewob26uy34ey7cz35h72rrkbc.py # Topologically Sorted Source Nodes: [P3_x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # P3_x_2 => convolution_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_9, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 256], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = (yindex // 256) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (65536*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (256*y3)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (256, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (256, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256, ), (1, )) assert_size_stride(primals_11, (256, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_12, (256, ), (1, )) assert_size_stride(primals_13, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_3, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 1, 32, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_8, buf1, 16, 64, grid=grid(16, 64), stream=stream0) del primals_8 buf2 = empty_strided_cuda((4, 4, 16, 16), (1024, 1, 64, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_13, buf2, 16, 256, grid=grid(16, 256), stream=stream0) del primals_13 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_14, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_14 # Topologically Sorted Source Nodes: [P5_x], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf5 = empty_strided_cuda((8, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [P5_upsampled_x], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_4.run(buf5, 8, grid=grid(8), stream=stream0) # Topologically Sorted Source Nodes: [P4_x], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf7 = empty_strided_cuda((16, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [P4_upsampled_x], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_5.run(buf7, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [P3_x], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf2, primals_11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [P5_x, P5_upsampled_x, P4_x, P4_x_1, P4_upsampled_x, P3_x, P3_x_1], Original ATen: [aten.convolution, aten._unsafe_index, aten.add] triton_poi_fused__unsafe_index_add_convolution_6.run(buf9, primals_12, buf7, buf5, buf4, primals_2, buf6, primals_7, 262144, grid=grid(262144), stream=stream0) del buf4 del buf6 del primals_12 del primals_2 del primals_7 # Topologically Sorted Source Nodes: [P3_x_2], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf11 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [P3_x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf10, primals_15, buf11, 1024, 256, grid=grid(1024, 256), stream=stream0) del buf10 del primals_15 return (buf11, primals_1, buf0, primals_6, buf1, primals_11, buf2, buf3, buf5, buf7, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 8, 8), (256, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4, 16, 16), (1024, 256, 16, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FeaturePyramidNetwork(nn.Module): def __init__(self, C3_feature, C4_feature, C5_feature, feature_size=256): super(FeaturePyramidNetwork, self).__init__() self.P5_1 = nn.Conv2d(C5_feature, feature_size, kernel_size=1, stride=1, padding=0) self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest') self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) self.P4_1 = nn.Conv2d(C4_feature, feature_size, kernel_size=1, stride=1, padding=0) self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest') self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) self.P3_1 = nn.Conv2d(C3_feature, feature_size, kernel_size=1, stride=1, padding=0) self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) def forward(self, C3, C4, C5): P5_x = self.P5_1(C5) P5_upsampled_x = self.P5_upsampled(P5_x) P5_x = self.P5_2(P5_x) P4_x = self.P4_1(C4) P4_x = P5_upsampled_x + P4_x P4_upsampled_x = self.P4_upsampled(P4_x) P4_x = self.P4_2(P4_x) P3_x = self.P3_1(C3) P3_x = P3_x + P4_upsampled_x P3_x = self.P3_2(P3_x) return P3_x def get_inputs(): return [torch.rand([4, 4, 16, 16]), torch.rand([4, 4, 8, 8]), torch. rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C3_feature': 4, 'C4_feature': 4, 'C5_feature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 64 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 256 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 1024 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_5(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 256 x2 = xindex // 4096 % 16 x1 = xindex // 256 % 16 x3 = xindex // 65536 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tl.full([XBLOCK], 8, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tmp9 = tmp8 + tmp4 tmp10 = tmp8 < 0 tmp11 = tl.where(tmp10, tmp9, tmp8) tmp12 = tl.load(in_ptr2 + tmp7, None, eviction_policy='evict_last') tmp13 = tl.full([XBLOCK], 4, tl.int32) tmp14 = tmp12 + tmp13 tmp15 = tmp12 < 0 tmp16 = tl.where(tmp15, tmp14, tmp12) tmp17 = tl.load(in_ptr2 + tmp11, None, eviction_policy='evict_last') tmp18 = tmp17 + tmp13 tmp19 = tmp17 < 0 tmp20 = tl.where(tmp19, tmp18, tmp17) tmp21 = tl.load(in_ptr3 + (x0 + 256 * tmp20 + 1024 * tmp16 + 4096 * x3), None) tmp23 = tmp21 + tmp22 tmp24 = tl.load(in_ptr5 + (x0 + 256 * tmp11 + 2048 * tmp7 + 16384 * x3), None) tmp26 = tmp24 + tmp25 tmp27 = tmp23 + tmp26 tmp28 = tmp2 + tmp27 tl.store(in_out_ptr0 + x4, tmp28, None) @triton.jit def triton_poi_fused_convolution_7(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (256, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256,), (1,)) assert_size_stride(primals_11, (256, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_12, (256,), (1,)) assert_size_stride(primals_13, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 1, 32, 4), torch.float32) triton_poi_fused_1[grid(16, 64)](primals_8, buf1, 16, 64, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf2 = empty_strided_cuda((4, 4, 16, 16), (1024, 1, 64, 4), torch. float32) triton_poi_fused_2[grid(16, 256)](primals_13, buf2, 16, 256, XBLOCK =64, YBLOCK=16, num_warps=4, num_stages=1) del primals_13 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_3[grid(65536, 9)](primals_14, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf4 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf5 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(8)](buf5, 8, XBLOCK =8, num_warps=1, num_stages=1) buf6 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf7 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_5[grid(16)](buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = extern_kernels.convolution(buf2, primals_11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf9 = buf8 del buf8 triton_poi_fused__unsafe_index_add_convolution_6[grid(262144)](buf9, primals_12, buf7, buf5, buf4, primals_2, buf6, primals_7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf4 del buf6 del primals_12 del primals_2 del primals_7 buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf11 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) triton_poi_fused_convolution_7[grid(1024, 256)](buf10, primals_15, buf11, 1024, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf10 del primals_15 return (buf11, primals_1, buf0, primals_6, buf1, primals_11, buf2, buf3, buf5, buf7, buf9) class FeaturePyramidNetworkNew(nn.Module): def __init__(self, C3_feature, C4_feature, C5_feature, feature_size=256): super(FeaturePyramidNetworkNew, self).__init__() self.P5_1 = nn.Conv2d(C5_feature, feature_size, kernel_size=1, stride=1, padding=0) self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest') self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) self.P4_1 = nn.Conv2d(C4_feature, feature_size, kernel_size=1, stride=1, padding=0) self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest') self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) self.P3_1 = nn.Conv2d(C3_feature, feature_size, kernel_size=1, stride=1, padding=0) self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) def forward(self, input_0, input_1, input_2): primals_1 = self.P5_1.weight primals_2 = self.P5_1.bias primals_4 = self.P5_2.weight primals_5 = self.P5_2.bias primals_6 = self.P4_1.weight primals_7 = self.P4_1.bias primals_9 = self.P4_2.weight primals_10 = self.P4_2.bias primals_11 = self.P3_1.weight primals_12 = self.P3_1.bias primals_14 = self.P3_2.weight primals_15 = self.P3_2.bias primals_13 = input_0 primals_8 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
kiyohiro8/SemanticReasoningNetworks
FeaturePyramidNetwork
false
12,697
[ "MIT" ]
0
9dc20706a2234511789a7a2fa07cc3b77c64bf81
https://github.com/kiyohiro8/SemanticReasoningNetworks/tree/9dc20706a2234511789a7a2fa07cc3b77c64bf81
_Hswish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/jj/cjjcpa4jfom3kmx4ufnxtda3bmq466cpemkegyhzep2ymmlsg35l.py # Topologically Sorted Source Nodes: [add, hardtanh, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div] # Source node to ATen node mapping: # add => add # hardtanh => clamp_max, clamp_min # mul => mul # truediv => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3.0), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %clamp_max), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6.0), kwargs = {}) triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = 0.16666666666666666 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, hardtanh, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _Hswish(nn.Module): def __init__(self, inplace=True): super(_Hswish, self).__init__() self.relu6 = nn.ReLU6(inplace) def forward(self, x): return x * self.relu6(x + 3.0) / 6.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = 0.16666666666666666 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class _HswishNew(nn.Module): def __init__(self, inplace=True): super(_HswishNew, self).__init__() self.relu6 = nn.ReLU6(inplace) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hzwangjl/Lightweight-Segmentation
_Hswish
false
12,698
[ "Apache-2.0" ]
0
3a476719bdfee653ac1e1617c22714b7ee932cef
https://github.com/hzwangjl/Lightweight-Segmentation/tree/3a476719bdfee653ac1e1617c22714b7ee932cef
AR
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ob/cobtm4z6tqthirp5npt5j3cxa4jheh2rs6tj46lbtaa3xmkk3spe.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add] # Source node to ATen node mapping: # x_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 1, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn from typing import * class AR(nn.Module): def __init__(self, window): super(AR, self).__init__() self.linear = nn.Linear(window, 1) def forward(self, x): x = torch.transpose(x, 1, 2) x = self.linear(x) x = torch.transpose(x, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'window': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn from typing import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_add_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 1, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class ARNew(nn.Module): def __init__(self, window): super(ARNew, self).__init__() self.linear = nn.Linear(window, 1) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kuleshov/multivariate-deep-learning
AR
false
12,699
[ "MIT" ]
0
c87bf321a13fdb44c22decf6f685296b8f637a67
https://github.com/kuleshov/multivariate-deep-learning/tree/c87bf321a13fdb44c22decf6f685296b8f637a67
SoftNLL
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ug/cugeeyor3h3pksbthqmiju2ox34arhybyzvkkikhf3du4yulpajg.py # Topologically Sorted Source Nodes: [log, mul, sum_1, mean, neg], Original ATen: [aten.log, aten.mul, aten.sum, aten.mean, aten.neg] # Source node to ATen node mapping: # log => log # mean => mean # mul => mul # neg => neg # sum_1 => sum_1 # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, %arg1_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) triton_per_fused_log_mean_mul_neg_sum_0 = async_compile.triton('triton_per_fused_log_mean_mul_neg_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log_mean_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_log_mean_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp6 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp11 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp16 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tl_math.log(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tl_math.log(tmp4) tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp10 = tl_math.log(tmp9) tmp12 = tmp10 * tmp11 tmp13 = tmp8 + tmp12 tmp15 = tl_math.log(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp22 = 64.0 tmp23 = tmp21 / tmp22 tmp24 = -tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp24, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [log, mul, sum_1, mean, neg], Original ATen: [aten.log, aten.mul, aten.sum, aten.mean, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_log_mean_mul_neg_sum_0.run(buf1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SoftNLL(nn.Module): def __init__(self): """The `soft' version of negative_log_likelihood, where y is a distribution over classes rather than a one-hot coding """ super(SoftNLL, self).__init__() def forward(self, input, target): return -torch.mean(torch.sum(torch.log(input) * target, dim=1)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_log_mean_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp6 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.log(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tl_math.log(tmp4) tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp10 = tl_math.log(tmp9) tmp12 = tmp10 * tmp11 tmp13 = tmp8 + tmp12 tmp15 = tl_math.log(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp22 = 64.0 tmp23 = tmp21 / tmp22 tmp24 = -tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_log_mean_mul_neg_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SoftNLLNew(nn.Module): def __init__(self): """The `soft' version of negative_log_likelihood, where y is a distribution over classes rather than a one-hot coding """ super(SoftNLLNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
lehgtrung/gcn-over-pruned-trees
SoftNLL
false
12,700
[ "Apache-2.0" ]
0
ebf0de0948883009a9bebb8ff336e8d6fe50a26f
https://github.com/lehgtrung/gcn-over-pruned-trees/tree/ebf0de0948883009a9bebb8ff336e8d6fe50a26f
GlobalMaxPool1d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/l7/cl7te2znkphosjjgl5c32tkhoiehr4feuczsseen53wz7j4ifolb.py # Topologically Sorted Source Nodes: [max_pool1d], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool1d => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 4], [1, 4], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [max_pool1d], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class GlobalMaxPool1d(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.max_pool1d(input, kernel_size=input.size()[2:] ).view(-1, input.size(1)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class GlobalMaxPool1dNew(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
liaoweiduo/few-shot
GlobalMaxPool1d
false
12,701
[ "MIT" ]
0
24d54fa3b472194b8cdab0ec6017bc5f649380a0
https://github.com/liaoweiduo/few-shot/tree/24d54fa3b472194b8cdab0ec6017bc5f649380a0
LearnedPositionalEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5i/c5iybmnijeaxq3pumkl5crtkns462pwdrh72bxy4lcvnlh3r4364.py # Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum] # Source node to ATen node mapping: # cumsum => cumsum # mask => convert_element_type # ne => ne # Graph fragment: # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%convert_element_type, 1), kwargs = {}) triton_per_fused__to_copy_cumsum_ne_0 = async_compile.triton('triton_per_fused__to_copy_cumsum_ne_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_cumsum_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0) tmp1 = 4.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int32) tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.int64) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ft/cftxaavy7b7scxgnrhfsvfnicvimxnf3kpckow5nzkyed3meyoli.py # Topologically Sorted Source Nodes: [ne, mask, type_as, mul, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add] # Source node to ATen node mapping: # long => convert_element_type_2 # mask => convert_element_type # mul => mul # ne => ne # positions => add # type_as => convert_element_type_1 # Graph fragment: # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%cumsum, torch.int32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %convert_element_type), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul, torch.int64), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 4), kwargs = {}) triton_poi_fused__to_copy_add_mul_ne_1 = async_compile.triton('triton_poi_fused__to_copy_add_mul_ne_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_mul_ne_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0.to(tl.int32) tmp3 = 4.0 tmp4 = tmp2 != tmp3 tmp5 = tmp4.to(tl.int32) tmp6 = tmp1 * tmp5 tmp7 = tmp6.to(tl.int64) tmp8 = tl.full([1], 4, tl.int64) tmp9 = tmp7 + tmp8 tl.store(in_out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/in/cinglqnf6mtochspmiolvr3bqay6yiivzgqlihpkdlbd5p4ccw54.py # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding] # Source node to ATen node mapping: # embedding => embedding # Graph fragment: # %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %add, 4), kwargs = {}) triton_poi_fused_embedding_2 = async_compile.triton('triton_poi_fused_embedding_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 9, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 9)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 9") tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (9, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum] stream0 = get_raw_stream(0) triton_per_fused__to_copy_cumsum_ne_0.run(primals_1, buf0, 64, 4, grid=grid(64), stream=stream0) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [ne, mask, type_as, mul, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add] triton_poi_fused__to_copy_add_mul_ne_1.run(buf1, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding] triton_poi_fused_embedding_2.run(buf1, primals_2, buf2, 1024, grid=grid(1024), stream=stream0) del primals_2 return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((9, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', padding_idx: 'int'): if padding_idx is not None: num_embeddings_ = num_embeddings + padding_idx + 1 else: num_embeddings_ = num_embeddings super().__init__(num_embeddings_, embedding_dim, padding_idx) self.max_positions = num_embeddings def forward(self, input: 'torch.Tensor'): """Input is expected to be of size [bsz x seqlen].""" if input.size(1) > self.max_positions: raise ValueError( f'Sequence length {input.size(1)} above maximum sequence length of {self.max_positions}' ) mask = input.ne(self.padding_idx).int() positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long( ) + self.padding_idx return F.embedding(positions, self.weight, self.padding_idx, self. max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_embeddings': 4, 'embedding_dim': 4, 'padding_idx': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = 4.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int32) tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.int64) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp7, xmask) @triton.jit def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.int32) tmp3 = 4.0 tmp4 = tmp2 != tmp3 tmp5 = tmp4.to(tl.int32) tmp6 = tmp1 * tmp5 tmp7 = tmp6.to(tl.int64) tmp8 = tl.full([1], 4, tl.int64) tmp9 = tmp7 + tmp8 tl.store(in_out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 9, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 9) | ~xmask, 'index out of bounds: 0 <= tmp4 < 9') tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (9, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) get_raw_stream(0) triton_per_fused__to_copy_cumsum_ne_0[grid(64)](primals_1, buf0, 64, 4, XBLOCK=32, num_warps=2, num_stages=1) buf1 = buf0 del buf0 triton_poi_fused__to_copy_add_mul_ne_1[grid(256)](buf1, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_embedding_2[grid(1024)](buf1, primals_2, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, buf1 class LearnedPositionalEmbeddingNew(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', padding_idx: 'int'): if padding_idx is not None: num_embeddings_ = num_embeddings + padding_idx + 1 else: num_embeddings_ = num_embeddings super().__init__(num_embeddings_, embedding_dim, padding_idx) self.max_positions = num_embeddings def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
leeharry92/esm
LearnedPositionalEmbedding
false
12,702
[ "MIT" ]
0
7d0feccf03ebbdeba4e7ba0f21d934099a0223ce
https://github.com/leeharry92/esm/tree/7d0feccf03ebbdeba4e7ba0f21d934099a0223ce
MSELoss2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/m4/cm4xtb3bnuq3knchbeowql4btzwa7fgl5fl2ojphcrhtylwyafoa.py # Topologically Sorted Source Nodes: [softmax, loss], Original ATen: [aten._softmax, aten.mse_loss] # Source node to ATen node mapping: # loss => mean, pow_1, sub_1 # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) triton_per_fused__softmax_mse_loss_1 = async_compile.triton('triton_per_fused__softmax_mse_loss_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_mse_loss_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_mse_loss_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (r3), None) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [softmax, loss], Original ATen: [aten._softmax, aten.mse_loss] triton_per_fused__softmax_mse_loss_1.run(buf2, buf0, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MSELoss2d(nn.Module): def __init__(self, size_average=None, reduce=None, reduction='mean', ignore_index=255): super(MSELoss2d, self).__init__() self.MSE = nn.MSELoss(size_average=size_average, reduce=reduce, reduction=reduction) def forward(self, output, target): loss = self.MSE(torch.softmax(output, dim=1), target) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_per_fused__softmax_mse_loss_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + r3, None) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__softmax_mse_loss_1[grid(1)](buf2, buf0, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class MSELoss2dNew(nn.Module): def __init__(self, size_average=None, reduce=None, reduction='mean', ignore_index=255): super(MSELoss2dNew, self).__init__() self.MSE = nn.MSELoss(size_average=size_average, reduce=reduce, reduction=reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
leo-hao/DACS
MSELoss2d
false
12,703
[ "MIT" ]
0
9fe9bc077a9a0e0fd2b118bfc2d522c2b6fb624e
https://github.com/leo-hao/DACS/tree/9fe9bc077a9a0e0fd2b118bfc2d522c2b6fb624e
ShiftedConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/bi/cbirdtqu75ekpesssmflsmp3pefzvdh6ju6mdlels6qajyoqabvh.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%full_default, %permute], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = (xindex // 7) % 4 x2 = (xindex // 28) x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 0.0 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp0 >= tmp3 tmp9 = tl.full([1], 7, tl.int64) tmp10 = tmp0 < tmp9 tmp11 = tl.load(in_ptr0 + (x1 + (4*((-3) + x0)) + (16*x2)), tmp8 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp4, tmp7, tmp11) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2v/c2vq3pqmx7c3oxfp7p5dhdjlsuc23intbs2hzh7vg3guwwm2uicn.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.mul] # Source node to ATen node mapping: # x_2 => convolution # x_3 => mul # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.3535533905932738), kwargs = {}) triton_poi_fused_convolution_mul_1 = async_compile.triton('triton_poi_fused_convolution_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.3535533905932738 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf0, 112, grid=grid(112), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.mul] triton_poi_fused_convolution_mul_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn from numpy import prod def getLayerNormalizationFactor(x): """ Get He's constant for the given layer https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf """ size = x.weight.size() fan_in = prod(size[1:]) return math.sqrt(2.0 / fan_in) class ConstrainedLayer(nn.Module): """ A handy refactor that allows the user to: - initialize one layer's bias to zero - apply He's initialization at runtime """ def __init__(self, module, equalized=True, lrMul=1.0, initBiasToZero=True): """ equalized (bool): if true, the layer's weight should evolve within the range (-1, 1) initBiasToZero (bool): if true, bias will be initialized to zero """ super(ConstrainedLayer, self).__init__() self.module = module self.equalized = equalized if initBiasToZero and module.bias is not None: self.module.bias.data.fill_(0) if self.equalized: self.module.weight.data.normal_(0, 1) self.weight = getLayerNormalizationFactor(self.module) * lrMul def forward(self, x): x = self.module(x) if self.equalized: x *= self.weight return x class EqualizedConv1d(ConstrainedLayer): def __init__(self, nChannelsPrevious, nChannels, kernelSize, padding=0, bias=True, stride=1, **kwargs): """ A nn.Conv2d module with specific constraints Args: nChannelsPrevious (int): number of channels in the previous layer nChannels (int): number of channels of the current layer kernelSize (int): size of the convolutional kernel padding (int): convolution's padding bias (bool): with bias ? """ ConstrainedLayer.__init__(self, nn.Conv1d(nChannelsPrevious, nChannels, kernelSize, padding=padding, bias=bias, stride= stride), **kwargs) class ShiftedConv(nn.Module): def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize): super(ShiftedConv, self).__init__() self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder, kernelSize, equalized=True, padding=0) self.kernelSize = kernelSize def forward(self, x): N, _S, C = x.size() x = x.permute(0, 2, 1) padding = torch.zeros(N, C, self.kernelSize - 1, device=x.device) x = torch.cat([padding, x], dim=2) x = self.module(x) x = x.permute(0, 2, 1) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dimOutputAR': 4, 'dimOutputEncoder': 4, 'kernelSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn from numpy import prod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = xindex // 7 % 4 x2 = xindex // 28 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 0.0 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp0 >= tmp3 tl.full([1], 7, tl.int64) tmp11 = tl.load(in_ptr0 + (x1 + 4 * (-3 + x0) + 16 * x2), tmp8 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp4, tmp7, tmp11) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.3535533905932738 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(112)](primals_1, buf0, 112, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_mul_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf0 def getLayerNormalizationFactor(x): """ Get He's constant for the given layer https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf """ size = x.weight.size() fan_in = prod(size[1:]) return math.sqrt(2.0 / fan_in) class ConstrainedLayer(nn.Module): """ A handy refactor that allows the user to: - initialize one layer's bias to zero - apply He's initialization at runtime """ def __init__(self, module, equalized=True, lrMul=1.0, initBiasToZero=True): """ equalized (bool): if true, the layer's weight should evolve within the range (-1, 1) initBiasToZero (bool): if true, bias will be initialized to zero """ super(ConstrainedLayer, self).__init__() self.module = module self.equalized = equalized if initBiasToZero and module.bias is not None: self.module.bias.data.fill_(0) if self.equalized: self.module.weight.data.normal_(0, 1) self.weight = getLayerNormalizationFactor(self.module) * lrMul def forward(self, x): x = self.module(x) if self.equalized: x *= self.weight return x class EqualizedConv1d(ConstrainedLayer): def __init__(self, nChannelsPrevious, nChannels, kernelSize, padding=0, bias=True, stride=1, **kwargs): """ A nn.Conv2d module with specific constraints Args: nChannelsPrevious (int): number of channels in the previous layer nChannels (int): number of channels of the current layer kernelSize (int): size of the convolutional kernel padding (int): convolution's padding bias (bool): with bias ? """ ConstrainedLayer.__init__(self, nn.Conv1d(nChannelsPrevious, nChannels, kernelSize, padding=padding, bias=bias, stride= stride), **kwargs) class ShiftedConvNew(nn.Module): def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize): super(ShiftedConvNew, self).__init__() self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder, kernelSize, equalized=True, padding=0) self.kernelSize = kernelSize def forward(self, input_0): primals_1 = self.module.module.weight primals_3 = self.module.module.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
leo19941227/CPC_audio
ShiftedConv
false
12,704
[ "MIT" ]
0
2d0051915f4b4a5f773e4510cd5535e1fcb433d8
https://github.com/leo19941227/CPC_audio/tree/2d0051915f4b4a5f773e4510cd5535e1fcb433d8
_Hsigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/gl/cgljna3wfarubemgd6d2p3bgazvfhdxtrcu7luu5yza3rrfkty2s.py # Topologically Sorted Source Nodes: [add, hardtanh, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div] # Source node to ATen node mapping: # add => add # hardtanh => clamp_max, clamp_min # truediv => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3.0), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {}) triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, hardtanh, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _Hsigmoid(nn.Module): def __init__(self, inplace=True): super(_Hsigmoid, self).__init__() self.relu6 = nn.ReLU6(inplace) def forward(self, x): return self.relu6(x + 3.0) / 6.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class _HsigmoidNew(nn.Module): def __init__(self, inplace=True): super(_HsigmoidNew, self).__init__() self.relu6 = nn.ReLU6(inplace) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hzwangjl/Lightweight-Segmentation
_Hsigmoid
false
12,705
[ "Apache-2.0" ]
0
3a476719bdfee653ac1e1617c22714b7ee932cef
https://github.com/hzwangjl/Lightweight-Segmentation/tree/3a476719bdfee653ac1e1617c22714b7ee932cef
TransposedConvModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/2o/c2osclrsohpe3tpndjv4ekqsr45ydz7qlxpbgdi5flkzlbs6sptt.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 36) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mg/cmgsufzfcy5kkzrqdwhutk2rm5vnoeldyqqow7ed43pwacyq4p76.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_2, (10, ), (1, )) assert_size_stride(primals_3, (4, 10, 4, 4), (160, 16, 4, 1)) assert_size_stride(primals_4, (10, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 6, 6), (360, 36, 6, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1440, grid=grid(1440), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 10, 8, 8), (640, 64, 8, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 2560, grid=grid(2560), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((10, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 10, 4, 4), (160, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((10, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 class TransposedConvModel(torch.nn.Module): def __init__(self): super(TransposedConvModel, self).__init__() self.conv1 = torch.nn.ConvTranspose2d(10, 10, 3) self.relu1 = torch.nn.ReLU() self.conv2 = torch.nn.ConvTranspose2d(10, 10, 3) def forward(self, x): x = self.conv1(x) x = self.relu1(x) x = self.conv2(x) return x def get_inputs(): return [torch.rand([4, 10, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 10, 4, 4), (160, 16, 4, 1)) assert_size_stride(primals_4, (10, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 6, 6), (360, 36, 6, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1440)](buf1, primals_2, 1440, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 10, 8, 8), (640, 64, 8, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(2560)](buf3, primals_5, 2560, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class TransposedConvModelNew(torch.nn.Module): def __init__(self): super(TransposedConvModelNew, self).__init__() self.conv1 = torch.nn.ConvTranspose2d(10, 10, 3) self.relu1 = torch.nn.ReLU() self.conv2 = torch.nn.ConvTranspose2d(10, 10, 3) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
arjunsuresh/aimet
TransposedConvModel
false
12,706
[ "BSD-3-Clause" ]
0
f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
https://github.com/arjunsuresh/aimet/tree/f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
ln_mod
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/mo/cmorwi4rssg3yydylx6zgmrgf5eup5jag7rkdb4oyttging7iaei.py # Topologically Sorted Source Nodes: [std, pow_1, add, sqrt, truediv, mul], Original ATen: [aten.std, aten.pow, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # pow_1 => pow_1 # sqrt => sqrt_1 # std => sqrt, var # truediv => div # Graph fragment: # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1, [-1]), kwargs = {correction: 0.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sqrt, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 1e-05), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %arg1_1), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_std_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_std_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_std_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_std_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp1 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp2 - tmp9 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = tmp4 - tmp9 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp6 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp20 / tmp8 tmp22 = libdevice.sqrt(tmp21) tmp23 = tmp22 * tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp0 / tmp26 tmp29 = tmp27 * tmp28 tl.store(out_ptr0 + (x2), tmp29, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [std, pow_1, add, sqrt, truediv, mul], Original ATen: [aten.std, aten.pow, aten.add, aten.sqrt, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_std_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.parameter import Parameter class ln_mod(nn.Module): def __init__(self, nx, eps=1e-05): super().__init__() self.eps = eps self.weight = Parameter(torch.Tensor(nx)) def forward(self, x): return x / torch.sqrt(torch.std(x, axis=-1, unbiased=False, keepdim =True) ** 2 + self.eps) * self.weight.data[..., :] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nx': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_std_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp1 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp2 - tmp9 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = tmp4 - tmp9 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp6 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp20 / tmp8 tmp22 = libdevice.sqrt(tmp21) tmp23 = tmp22 * tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp0 / tmp26 tmp29 = tmp27 * tmp28 tl.store(out_ptr0 + x2, tmp29, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_std_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class ln_modNew(nn.Module): def __init__(self, nx, eps=1e-05): super().__init__() self.eps = eps self.weight = Parameter(torch.Tensor(nx)) def forward(self, input_0): arg1_1 = self.weight arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
lienghongky/image-gpt2
ln_mod
false
12,707
[ "MIT" ]
0
ef9f3c61d4a09cbb75114dd067d0014948e82d7b
https://github.com/lienghongky/image-gpt2/tree/ef9f3c61d4a09cbb75114dd067d0014948e82d7b
TemperatureHolder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xp/cxpd2bcgjjguvsbnowhprcscim22tcfdskii4vimpwk2r73oeben.py # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] # Source node to ATen node mapping: # exp => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_exp_0 = async_compile.triton('triton_poi_fused_exp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl_math.exp(tmp1) tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_0.run(primals_1, buf0, 1, grid=grid(1), stream=stream0) del primals_1 return (buf0, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class TemperatureHolder(nn.Module): """Module that holds a temperature as a learnable value. Args: initial_log_temperature (float): Initial value of log(temperature). """ def __init__(self, initial_log_temperature=0): super().__init__() self.log_temperature = nn.Parameter(torch.tensor( initial_log_temperature, dtype=torch.float32)) def forward(self): """Return a temperature as a torch.Tensor.""" return torch.exp(self.log_temperature) def get_inputs(): return [] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl_math.exp(tmp1) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(1)](primals_1, buf0, 1, XBLOCK=1, num_warps=1, num_stages=1) del primals_1 return buf0, buf0 class TemperatureHolderNew(nn.Module): """Module that holds a temperature as a learnable value. Args: initial_log_temperature (float): Initial value of log(temperature). """ def __init__(self, initial_log_temperature=0): super().__init__() self.log_temperature = nn.Parameter(torch.tensor( initial_log_temperature, dtype=torch.float32)) def forward(self): primals_1 = self.log_temperature output = call([primals_1]) return output[0]
lin826/pfrl
TemperatureHolder
false
12,708
[ "MIT" ]
0
62d7f13b854f1879211a386fd870a7db982cc8ec
https://github.com/lin826/pfrl/tree/62d7f13b854f1879211a386fd870a7db982cc8ec
InnerProductNetwork
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/4j/c4jap2mmmjx3sc7h5t72pmbjcbecx7tmbl4pp62ydyuz7rfnbxw5.py # Topologically Sorted Source Nodes: [getitem, getitem_1, mul, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] # Source node to ATen node mapping: # getitem => index # getitem_1 => index_1 # mul => mul # sum_1 => sum_1 # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy_1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) triton_poi_fused_index_mul_sum_0 = async_compile.triton('triton_poi_fused_index_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x0 = xindex % 4 x2 = (xindex // 24) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (x0 + (16*tmp16) + (64*x2)), xmask) tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (x0 + (16*tmp22) + (64*x2)), xmask) tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (4 + x0 + (16*tmp16) + (64*x2)), xmask) tmp26 = tl.load(in_ptr0 + (4 + x0 + (16*tmp22) + (64*x2)), xmask) tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (8 + x0 + (16*tmp16) + (64*x2)), xmask) tmp30 = tl.load(in_ptr0 + (8 + x0 + (16*tmp22) + (64*x2)), xmask) tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (12 + x0 + (16*tmp16) + (64*x2)), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0 + (16*tmp22) + (64*x2)), xmask) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + (x3), tmp36, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [getitem, getitem_1, mul, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_index_mul_sum_0.run(arg0_1, buf0, 96, grid=grid(96), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class InnerProductNetwork(torch.nn.Module): def forward(self, x): """ :param x: Float tensor of size ``(batch_size, num_fields, embed_dim)`` """ num_fields = x.shape[1] row, col = list(), list() for i in range(num_fields - 1): for j in range(i + 1, num_fields): row.append(i), col.append(j) return torch.sum(x[:, row] * x[:, col], dim=2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (x0 + 16 * tmp16 + 64 * x2), xmask) tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (x0 + 16 * tmp22 + 64 * x2), xmask) tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (4 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp26 = tl.load(in_ptr0 + (4 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (8 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp30 = tl.load(in_ptr0 + (8 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (12 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sum_0[grid(96)](arg0_1, buf0, 96, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class InnerProductNetworkNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
lipmedusea/pytorch
InnerProductNetwork
false
12,709
[ "MIT" ]
0
5d94694b9e1193a93dd7f75ea2042b5a1cf178bc
https://github.com/lipmedusea/pytorch/tree/5d94694b9e1193a93dd7f75ea2042b5a1cf178bc
FCLateActionSAQFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/lg/clgg7lidrsfgaud7ua7lulq4csdjjfhoa5iu6ldcldqvnqeoprch.py # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # h_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 8, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/p5/cp5tfqfwrfwxlkkeq5u27dwcgidea534dshg3prbpaz2f7xapt3e.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # h => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 8), (8, 1)) assert_size_stride(primals_6, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_3, primals_4, buf1, 32, grid=grid(32), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf0, primals_3, buf4, 16, grid=grid(16), stream=stream0) del buf0 del primals_3 return (buf3, primals_1, buf1, primals_5, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn from abc import ABCMeta from abc import abstractmethod import torch.nn.functional as F def init_lecun_normal(tensor, scale=1.0): """Initializes the tensor with LeCunNormal.""" fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in') std = scale * np.sqrt(1.0 / fan_in) with torch.no_grad(): return tensor.normal_(0, std) @torch.no_grad() def init_chainer_default(layer): """Initializes the layer with the chainer default. weights with LeCunNormal(scale=1.0) and zeros as biases """ assert isinstance(layer, nn.Module) if isinstance(layer, (nn.Linear, nn.Conv2d)): init_lecun_normal(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) return layer class MLP(nn.Module): """Multi-Layer Perceptron""" def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu, last_wscale=1): self.in_size = in_size self.out_size = out_size self.hidden_sizes = hidden_sizes self.nonlinearity = nonlinearity super().__init__() if hidden_sizes: self.hidden_layers = nn.ModuleList() self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0])) for hin, hout in zip(hidden_sizes, hidden_sizes[1:]): self.hidden_layers.append(nn.Linear(hin, hout)) self.hidden_layers.apply(init_chainer_default) self.output = nn.Linear(hidden_sizes[-1], out_size) else: self.output = nn.Linear(in_size, out_size) init_lecun_normal(self.output.weight, scale=last_wscale) nn.init.zeros_(self.output.bias) def forward(self, x): h = x if self.hidden_sizes: for l in self.hidden_layers: h = self.nonlinearity(l(h)) return self.output(h) class StateActionQFunction(object, metaclass=ABCMeta): """Abstract Q-function with state and action input.""" @abstractmethod def __call__(self, x, a): """Evaluates Q-function Args: x (ndarray): state input a (ndarray): action input Returns: Q-value for state x and action a """ raise NotImplementedError() class FCLateActionSAQFunction(nn.Module, StateActionQFunction): """Fully-connected (s,a)-input Q-function with late action input. Actions are not included until the second hidden layer and not normalized. This architecture is used in the DDPG paper: http://arxiv.org/abs/1509.02971 Args: n_dim_obs (int): Number of dimensions of observation space. n_dim_action (int): Number of dimensions of action space. n_hidden_channels (int): Number of hidden channels. n_hidden_layers (int): Number of hidden layers. It must be greater than or equal to 1. nonlinearity (callable): Nonlinearity between layers. It must accept a Variable as an argument and return a Variable with the same shape. Nonlinearities with learnable parameters such as PReLU are not supported. last_wscale (float): Scale of weight initialization of the last layer. """ def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels, n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0): assert n_hidden_layers >= 1 self.n_input_channels = n_dim_obs + n_dim_action self.n_hidden_layers = n_hidden_layers self.n_hidden_channels = n_hidden_channels self.nonlinearity = nonlinearity super().__init__() self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels, hidden_sizes=[]) self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1, hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers - 1), nonlinearity=nonlinearity, last_wscale=last_wscale) self.output = self.mlp.output def forward(self, state, action): h = self.nonlinearity(self.obs_mlp(state)) h = torch.cat((h, action), dim=1) return self.mlp(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_dim_obs': 4, 'n_dim_action': 4, 'n_hidden_channels': 4, 'n_hidden_layers': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch import nn from abc import ABCMeta from abc import abstractmethod import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 8), (8, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](buf0, primals_3, primals_4, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf0, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_3 return buf3, primals_1, buf1, primals_5, buf4 def init_lecun_normal(tensor, scale=1.0): """Initializes the tensor with LeCunNormal.""" fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in') std = scale * np.sqrt(1.0 / fan_in) with torch.no_grad(): return tensor.normal_(0, std) @torch.no_grad() def init_chainer_default(layer): """Initializes the layer with the chainer default. weights with LeCunNormal(scale=1.0) and zeros as biases """ assert isinstance(layer, nn.Module) if isinstance(layer, (nn.Linear, nn.Conv2d)): init_lecun_normal(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) return layer class MLP(nn.Module): """Multi-Layer Perceptron""" def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu, last_wscale=1): self.in_size = in_size self.out_size = out_size self.hidden_sizes = hidden_sizes self.nonlinearity = nonlinearity super().__init__() if hidden_sizes: self.hidden_layers = nn.ModuleList() self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0])) for hin, hout in zip(hidden_sizes, hidden_sizes[1:]): self.hidden_layers.append(nn.Linear(hin, hout)) self.hidden_layers.apply(init_chainer_default) self.output = nn.Linear(hidden_sizes[-1], out_size) else: self.output = nn.Linear(in_size, out_size) init_lecun_normal(self.output.weight, scale=last_wscale) nn.init.zeros_(self.output.bias) def forward(self, x): h = x if self.hidden_sizes: for l in self.hidden_layers: h = self.nonlinearity(l(h)) return self.output(h) class StateActionQFunction(object, metaclass=ABCMeta): """Abstract Q-function with state and action input.""" @abstractmethod def __call__(self, x, a): """Evaluates Q-function Args: x (ndarray): state input a (ndarray): action input Returns: Q-value for state x and action a """ raise NotImplementedError() class FCLateActionSAQFunctionNew(nn.Module, StateActionQFunction): """Fully-connected (s,a)-input Q-function with late action input. Actions are not included until the second hidden layer and not normalized. This architecture is used in the DDPG paper: http://arxiv.org/abs/1509.02971 Args: n_dim_obs (int): Number of dimensions of observation space. n_dim_action (int): Number of dimensions of action space. n_hidden_channels (int): Number of hidden channels. n_hidden_layers (int): Number of hidden layers. It must be greater than or equal to 1. nonlinearity (callable): Nonlinearity between layers. It must accept a Variable as an argument and return a Variable with the same shape. Nonlinearities with learnable parameters such as PReLU are not supported. last_wscale (float): Scale of weight initialization of the last layer. """ def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels, n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0): assert n_hidden_layers >= 1 self.n_input_channels = n_dim_obs + n_dim_action self.n_hidden_layers = n_hidden_layers self.n_hidden_channels = n_hidden_channels self.nonlinearity = nonlinearity super().__init__() self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels, hidden_sizes=[]) self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1, hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers - 1), nonlinearity=nonlinearity, last_wscale=last_wscale) self.output = self.mlp.output def forward(self, input_0, input_1): primals_1 = self.obs_mlp.output.weight primals_3 = self.obs_mlp.output.bias primals_5 = self.mlp.output.weight primals_6 = self.mlp.output.bias primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
lin826/pfrl
FCLateActionSAQFunction
false
12,710
[ "MIT" ]
0
62d7f13b854f1879211a386fd870a7db982cc8ec
https://github.com/lin826/pfrl/tree/62d7f13b854f1879211a386fd870a7db982cc8ec
TimeEncode
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/lo/clokukz4nbskqkm6bzg3ygkvygtaf7ckpqlwtxkl4gnncd7hhrnk.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cos] # Source node to ATen node mapping: # output => cos # Graph fragment: # %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%view_1,), kwargs = {}) triton_poi_fused_cos_0 = async_compile.triton('triton_poi_fused_cos_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cos_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.cos(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 1), (1, 1), 0), reinterpret_tensor(primals_2, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cos] stream0 = get_raw_stream(0) triton_poi_fused_cos_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) return (buf1, reinterpret_tensor(primals_1, (16, 1), (1, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np class TimeEncode(torch.nn.Module): def __init__(self, dimension): super(TimeEncode, self).__init__() self.dimension = dimension self.w = torch.nn.Linear(1, dimension) self.w.weight = torch.nn.Parameter(torch.from_numpy(1 / 10 ** np. linspace(0, 9, dimension)).float().reshape(dimension, -1)) self.w.bias = torch.nn.Parameter(torch.zeros(dimension).float()) def forward(self, t): t = t.unsqueeze(dim=2) output = torch.cos(self.w(t)) return output def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dimension': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.cos(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 1), (1, 1), 0), reinterpret_tensor(primals_2, (1, 4), (1, 1), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cos_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, reinterpret_tensor(primals_1, (16, 1), (1, 1), 0), buf0 class TimeEncodeNew(torch.nn.Module): def __init__(self, dimension): super(TimeEncodeNew, self).__init__() self.dimension = dimension self.w = torch.nn.Linear(1, dimension) self.w.weight = torch.nn.Parameter(torch.from_numpy(1 / 10 ** np. linspace(0, 9, dimension)).float().reshape(dimension, -1)) self.w.bias = torch.nn.Parameter(torch.zeros(dimension).float()) def forward(self, input_0): primals_2 = self.w.weight primals_3 = self.w.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
linhthi/tgn
TimeEncode
false
12,711
[ "Apache-2.0" ]
0
bb83f82d89aba07d07da3b173803fb0df32ebbbc
https://github.com/linhthi/tgn/tree/bb83f82d89aba07d07da3b173803fb0df32ebbbc
MergeLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu] # Source node to ATen node mapping: # h => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 return (buf3, buf0, buf2, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MergeLayer(torch.nn.Module): def __init__(self, dim1, dim2, dim3, dim4): super().__init__() self.fc1 = torch.nn.Linear(dim1 + dim2, dim3) self.fc2 = torch.nn.Linear(dim3, dim4) self.act = torch.nn.ReLU() torch.nn.init.xavier_normal_(self.fc1.weight) torch.nn.init.xavier_normal_(self.fc2.weight) def forward(self, x1, x2): x = torch.cat([x1, x2], dim=1) h = self.act(self.fc1(x)) return self.fc2(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim1': 4, 'dim2': 4, 'dim3': 4, 'dim4': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 return buf3, buf0, buf2, primals_5 class MergeLayerNew(torch.nn.Module): def __init__(self, dim1, dim2, dim3, dim4): super().__init__() self.fc1 = torch.nn.Linear(dim1 + dim2, dim3) self.fc2 = torch.nn.Linear(dim3, dim4) self.act = torch.nn.ReLU() torch.nn.init.xavier_normal_(self.fc1.weight) torch.nn.init.xavier_normal_(self.fc2.weight) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_1 = self.fc2.weight primals_6 = self.fc2.bias primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
linhthi/tgn
MergeLayer
false
12,712
[ "Apache-2.0" ]
0
bb83f82d89aba07d07da3b173803fb0df32ebbbc
https://github.com/linhthi/tgn/tree/bb83f82d89aba07d07da3b173803fb0df32ebbbc
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/y3/cy3bfx7suhzmqovrmgwr7kipqmduyq6otbqkuvdqlaprgg7g2dgl.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 80 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rn/crnqz7p4p2tzqanql5i55teqzlggwe7o5pppywh36vjdillkofmn.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (80, 4), (4, 1)) assert_size_stride(primals_2, (80, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 80), (80, 1)) assert_size_stride(primals_5, (10, ), (1, )) assert_size_stride(primals_6, (1, 10), (10, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 80), (80, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 80), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 80), (1280, 320, 80, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 80), (1280, 320, 80, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 5120, grid=grid(5120), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 80), (80, 1), 0), reinterpret_tensor(primals_4, (80, 10), (1, 80), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 640, grid=grid(640), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 10), (10, 1), 0), reinterpret_tensor(primals_6, (10, 1), (1, 10), 0), alpha=1, beta=1, out=buf5) del primals_7 return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 80), (80, 1), 0), reinterpret_tensor(buf3, (64, 10), (10, 1), 0), primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((80, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((80, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((10, 80), (80, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MLP(torch.nn.Module): def __init__(self, dim, drop=0.3): super().__init__() self.fc_1 = torch.nn.Linear(dim, 80) self.fc_2 = torch.nn.Linear(80, 10) self.fc_3 = torch.nn.Linear(10, 1) self.act = torch.nn.ReLU() self.dropout = torch.nn.Dropout(p=drop, inplace=False) def forward(self, x): x = self.act(self.fc_1(x)) x = self.dropout(x) x = self.act(self.fc_2(x)) x = self.dropout(x) return self.fc_3(x).squeeze(dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 80 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (80, 4), (4, 1)) assert_size_stride(primals_2, (80,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 80), (80, 1)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (1, 10), (10, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 80), (80, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 80), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 80), (1280, 320, 80, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 80), (1280, 320, 80, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(5120)](buf1, primals_2, buf7, 5120, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 80), (80, 1), 0), reinterpret_tensor(primals_4, (80, 10), (1, 80), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(640)](buf3, primals_5, buf6, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 10), (10, 1), 0), reinterpret_tensor(primals_6, (10, 1), (1, 10), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 80), (80, 1), 0), reinterpret_tensor( buf3, (64, 10), (10, 1), 0), primals_6, buf6, primals_4, buf7 class MLPNew(torch.nn.Module): def __init__(self, dim, drop=0.3): super().__init__() self.fc_1 = torch.nn.Linear(dim, 80) self.fc_2 = torch.nn.Linear(80, 10) self.fc_3 = torch.nn.Linear(10, 1) self.act = torch.nn.ReLU() self.dropout = torch.nn.Dropout(p=drop, inplace=False) def forward(self, input_0): primals_1 = self.fc_1.weight primals_2 = self.fc_1.bias primals_4 = self.fc_2.weight primals_5 = self.fc_2.bias primals_6 = self.fc_3.weight primals_7 = self.fc_3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
linhthi/tgn
MLP
false
12,713
[ "Apache-2.0" ]
0
bb83f82d89aba07d07da3b173803fb0df32ebbbc
https://github.com/linhthi/tgn/tree/bb83f82d89aba07d07da3b173803fb0df32ebbbc
ModuleForDdpCommHook
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3j/c3jyortvllaykpler373rz7oktpwexjmntais526ul7ro2uxfxcg.py # Topologically Sorted Source Nodes: [add, add_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # add_1 => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_3, (2, 2), (2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [add, add_1], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_3, primals_1, primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 del primals_3 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.utils.data.distributed import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.cuda import torch.cuda.nccl import torch.backends.cudnn import torch.backends.mkl class Task(nn.Module): def __init__(self): super().__init__() self.p = nn.Parameter(torch.ones(2, 2)) def forward(self, x): return self.p + x class ModuleForDdpCommHook(nn.Module): def __init__(self): super().__init__() self.t0 = Task() def forward(self, x, rank): return self.t0(x + rank) def get_inputs(): return [torch.rand([4, 4, 2, 2]), torch.rand([4, 4, 2, 2])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn import torch.utils.data.distributed import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.cuda import torch.cuda.nccl import torch.backends.cudnn import torch.backends.mkl assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_3, (2, 2), (2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](primals_3, primals_1, primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 return buf0, class Task(nn.Module): def __init__(self): super().__init__() self.p = nn.Parameter(torch.ones(2, 2)) def forward(self, x): return self.p + x class ModuleForDdpCommHookNew(nn.Module): def __init__(self): super().__init__() self.t0 = Task() def forward(self, input_0, input_1): primals_3 = self.t0.p primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
lipovsek/bagua
ModuleForDdpCommHook
false
12,714
[ "MIT" ]
0
d8b03333ab6cf3745279311b9da76e99d5c2c00a
https://github.com/lipovsek/bagua/tree/d8b03333ab6cf3745279311b9da76e99d5c2c00a
RMSEFeaturesLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/rm/crmbqjlvwuv2i5s3eme655nd5recp2ij4cgbsj7qduncqtixqhk4.py # Topologically Sorted Source Nodes: [mse_loss], Original ATen: [aten.mse_loss] # Source node to ATen node mapping: # mse_loss => pow_1, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) triton_per_fused_mse_loss_0 = async_compile.triton('triton_per_fused_mse_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_0(in_ptr0, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [mse_loss], Original ATen: [aten.mse_loss] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_0.run(arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data def rmseOnFeatures(feature_difference): gt = torch.zeros_like(feature_difference) return torch.nn.functional.mse_loss(feature_difference, gt, size_average=False) class RMSEFeaturesLoss(nn.Module): def __init__(self): super(RMSEFeaturesLoss, self).__init__() def forward(self, feature_difference): return rmseOnFeatures(feature_difference) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_0(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp4, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_mse_loss_0[grid(1)](arg0_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf0, def rmseOnFeatures(feature_difference): gt = torch.zeros_like(feature_difference) return torch.nn.functional.mse_loss(feature_difference, gt, size_average=False) class RMSEFeaturesLossNew(nn.Module): def __init__(self): super(RMSEFeaturesLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
liruihui/learning3d
RMSEFeaturesLoss
false
12,715
[ "MIT" ]
0
d513fb0956926f92c185594d4e236d26ecc7e81e
https://github.com/liruihui/learning3d/tree/d513fb0956926f92c185594d4e236d26ecc7e81e
LNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wq/cwq4v2t2casrvnd4kuih7ltbgx5737ucaocshzecbzd5g64pkqnq.py # Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.clone] # Source node to ATen node mapping: # lnn_out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl_math.abs(tmp0) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = libdevice.log1p(tmp3) tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bm/cbmeb2qgbivgi5ugwn3lxt7jt7xsbz2udthbkyvylpkqrid3yapc.py # Topologically Sorted Source Nodes: [lnn_out, lnn_exp, relu], Original ATen: [aten.clone, aten.expm1, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # lnn_exp => expm1 # lnn_out => clone_1 # relu => relu # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%clone_1,), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%expm1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_clone_expm1_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_clone_expm1_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_expm1_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_expm1_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = libdevice.expm1(tmp0) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = 0.0 tmp5 = tmp3 <= tmp4 tl.store(out_ptr0 + (x2 + (4*y3)), tmp3, xmask & ymask) tl.store(out_ptr1 + (x2 + (4*y3)), tmp5, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [lnn_out, lnn_exp, relu], Original ATen: [aten.clone, aten.expm1, aten.relu, aten.threshold_backward] triton_poi_fused_clone_expm1_relu_threshold_backward_1.run(buf1, buf2, buf3, 64, 4, grid=grid(64, 4), stream=stream0) return (reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data import torch.nn.functional as F class LNN(torch.nn.Module): """ A pytorch implementation of LNN layer Input shape - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``. Arguments - **in_features** : Embedding of feature. - **num_fields**: int.The field size of feature. - **LNN_dim**: int.The number of Logarithmic neuron. - **bias**: bool.Whether or not use bias in LNN. """ def __init__(self, num_fields, embed_dim, LNN_dim, bias=False): super(LNN, self).__init__() self.num_fields = num_fields self.embed_dim = embed_dim self.LNN_dim = LNN_dim self.lnn_output_dim = LNN_dim * embed_dim self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, x): """ :param x: Long tensor of size ``(batch_size, num_fields, embedding_size)`` """ embed_x_abs = torch.abs(x) embed_x_afn = torch.add(embed_x_abs, 1e-07) embed_x_log = torch.log1p(embed_x_afn) lnn_out = torch.matmul(self.weight, embed_x_log) if self.bias is not None: lnn_out += self.bias lnn_exp = torch.expm1(lnn_out) output = F.relu(lnn_exp).contiguous().view(-1, self.lnn_output_dim) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_fields': 4, 'embed_dim': 4, 'LNN_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl_math.abs(tmp0) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = libdevice.log1p(tmp3) tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_expm1_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = libdevice.expm1(tmp0) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = 0.0 tmp5 = tmp3 <= tmp4 tl.store(out_ptr0 + (x2 + 4 * y3), tmp3, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp5, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clone_expm1_relu_threshold_backward_1[grid(64, 4)]( buf1, buf2, buf3, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) return reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor( buf0, (64, 4), (4, 1), 0), buf1, buf3 class LNNNew(torch.nn.Module): """ A pytorch implementation of LNN layer Input shape - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``. Arguments - **in_features** : Embedding of feature. - **num_fields**: int.The field size of feature. - **LNN_dim**: int.The number of Logarithmic neuron. - **bias**: bool.Whether or not use bias in LNN. """ def __init__(self, num_fields, embed_dim, LNN_dim, bias=False): super(LNNNew, self).__init__() self.num_fields = num_fields self.embed_dim = embed_dim self.LNN_dim = LNN_dim self.lnn_output_dim = LNN_dim * embed_dim self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
lipmedusea/pytorch
LNN
false
12,716
[ "MIT" ]
0
5d94694b9e1193a93dd7f75ea2042b5a1cf178bc
https://github.com/lipmedusea/pytorch/tree/5d94694b9e1193a93dd7f75ea2042b5a1cf178bc
ConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/4j/c4jl5m5y24onp52mw7qcvibjgpz2yys33yfj3idumydodmt4ojyk.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 254016 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3969) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kx/ckx3dr2mk7yu7dobko5u5xgzcvgu2sbmviszgl2xoxawvzg7a5hf.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/w3/cw3faaaz7gty2gm7f4xovy33nk7gnjdtullauud6hfrckqbvdjvs.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 238144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3721) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a2/ca2lfs3rl4afomtng753d7cmatdcwn3fjki3ped72ucym7wkdbcw.py # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 8, 64, 64), (32768, 4096, 64, 1)) assert_size_stride(primals_2, (16, 8, 2, 2), (32, 4, 2, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (32, 16, 2, 2), (64, 4, 2, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (16, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (8, 16, 2, 2), (64, 4, 2, 1)) assert_size_stride(primals_9, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 63, 63), (63504, 3969, 63, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 254016, grid=grid(254016), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 492032, grid=grid(492032), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 61, 61), (59536, 3721, 61, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_7, 238144, grid=grid(238144), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 60, 60), (28800, 3600, 60, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf7, primals_9, 115200, grid=grid(115200), stream=stream0) del primals_9 return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 8, 64, 64), (32768, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 8, 2, 2), (32, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 16, 2, 2), (64, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 32, 2, 2), (128, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, 16, 2, 2), (64, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.utils.data.distributed import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.cuda import torch.cuda.nccl import torch.backends.cudnn import torch.backends.mkl class ConvNet(nn.Module): def __init__(self, gpus, layouts, dtypes): super(ConvNet, self).__init__() self.dtypes = dtypes if isinstance(gpus, list): self.layer_gpus = gpus else: gpus = [gpus] * 4 self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)) self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)) self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)) self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)) def forward(self, x): x = x self.layer_gpus if hasattr(self, 'layer_gpus') else [x.device] * 4 x = self.conv0(x) x = self.conv1(x) x = self.conv2(x) return self.conv3(x) def get_inputs(): return [torch.rand([4, 8, 64, 64])] def get_init_inputs(): return [[], {'gpus': False, 'layouts': 4, 'dtypes': torch.float32}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn import torch.utils.data.distributed import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.cuda import torch.cuda.nccl import torch.backends.cudnn import torch.backends.mkl assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 254016 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3969 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 238144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3721 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 8, 64, 64), (32768, 4096, 64, 1)) assert_size_stride(primals_2, (16, 8, 2, 2), (32, 4, 2, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (32, 16, 2, 2), (64, 4, 2, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (16, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (8, 16, 2, 2), (64, 4, 2, 1)) assert_size_stride(primals_9, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 63, 63), (63504, 3969, 63, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(254016)](buf1, primals_3, 254016, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(492032)](buf3, primals_5, 492032, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 61, 61), (59536, 3721, 61, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(238144)](buf5, primals_7, 238144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 60, 60), (28800, 3600, 60, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_3[grid(115200)](buf7, primals_9, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8, buf1, buf3, buf5) class ConvNetNew(nn.Module): def __init__(self, gpus, layouts, dtypes): super(ConvNetNew, self).__init__() self.dtypes = dtypes if isinstance(gpus, list): self.layer_gpus = gpus else: gpus = [gpus] * 4 self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)) self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)) self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)) self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)) def forward(self, input_0): primals_2 = self.conv0.weight primals_3 = self.conv0.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
lipovsek/bagua
ConvNet
false
12,717
[ "MIT" ]
0
d8b03333ab6cf3745279311b9da76e99d5c2c00a
https://github.com/lipovsek/bagua/tree/d8b03333ab6cf3745279311b9da76e99d5c2c00a
Task
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cq/ccqfqpgzxaos3bswguvdn3wt2ewpdl4jaru3enf3c7svmx3j3ar2.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (2, 2), (2, 1)) assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_1, primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.utils.data.distributed import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.cuda import torch.cuda.nccl import torch.backends.cudnn import torch.backends.mkl class Task(nn.Module): def __init__(self): super().__init__() self.p = nn.Parameter(torch.ones(2, 2)) def forward(self, x): return self.p + x def get_inputs(): return [torch.rand([4, 4, 2, 2])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn import torch.utils.data.distributed import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.cuda import torch.cuda.nccl import torch.backends.cudnn import torch.backends.mkl assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (2, 2), (2, 1)) assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](primals_1, primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 return buf0, class TaskNew(nn.Module): def __init__(self): super().__init__() self.p = nn.Parameter(torch.ones(2, 2)) def forward(self, input_0): primals_1 = self.p primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
lipovsek/bagua
Task
false
12,718
[ "MIT" ]
0
d8b03333ab6cf3745279311b9da76e99d5c2c00a
https://github.com/lipovsek/bagua/tree/d8b03333ab6cf3745279311b9da76e99d5c2c00a
EncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # z => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # z => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uq/cuq3l3e5tz3kz7sbro3jn6wnkgn4cskirh6kyc2yt5ejf4nvbq5p.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn => exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp0 * tmp5 tmp7 = tmp6 * tmp3 tmp8 = triton_helpers.maximum(tmp4, tmp7) tmp10 = tmp0 * tmp9 tmp11 = tmp10 * tmp3 tmp12 = triton_helpers.maximum(tmp8, tmp11) tmp14 = tmp0 * tmp13 tmp15 = tmp14 * tmp3 tmp16 = triton_helpers.maximum(tmp12, tmp15) tmp17 = tmp4 - tmp16 tmp18 = tmp17 * tmp3 tmp19 = tl_math.exp(tmp18) tmp20 = tmp7 - tmp16 tmp21 = tmp20 * tmp3 tmp22 = tl_math.exp(tmp21) tmp23 = tmp19 + tmp22 tmp24 = tmp11 - tmp16 tmp25 = tmp24 * tmp3 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp15 - tmp16 tmp29 = tmp28 * tmp3 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/g6/cg6sum6t4wd6e5aoymddynswsuo7ca7h72owwdamt362sig44bv3.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn => div, exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp3 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/lc/clc6qc6q57vscjt6xptfqnhjvnfsnxc5d6kouoflkvjdpfd7zuu3.py # Topologically Sorted Source Nodes: [a], Original ATen: [aten.clone] # Source node to ATen node mapping: # a => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_14,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zq/czqeiybdb6mlnwo4hmrayt3c44g7hbps2ftgdd7x2mv3sr2mwjbn.py # Topologically Sorted Source Nodes: [a, x_8, z_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # a => add_2 # x_8 => add_3 # z_1 => var_mean_1 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_16, %primals_11), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6l/c6l5kzogqt6qgowb3zqvwwwqezmjn5mmwq5w672exeszre3xha3f.py # Topologically Sorted Source Nodes: [a, x_8, z_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # a => add_2 # x_8 => add_3 # z_1 => add_4, add_5, mul_4, mul_5, rsqrt_1, sub_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_16, %primals_11), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_12), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_13), kwargs = {}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/c3/cc3chnm2jhwvm7bnvjd72vechcjm6nnwiwrdrtx3myjitgefk3he.py # Topologically Sorted Source Nodes: [g], Original ATen: [aten.elu] # Source node to ATen node mapping: # g => expm1, gt, mul_6, mul_8, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_18, 0), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_18, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_6,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_6, %mul_8), kwargs = {}) triton_poi_fused_elu_7 = async_compile.triton('triton_poi_fused_elu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fe/cfekjdsthatxjbbhgpigh2n2waatgzwuthjkuqadgzag4jvzvepw.py # Topologically Sorted Source Nodes: [a, x_8, x_10], Original ATen: [aten.add] # Source node to ATen node mapping: # a => add_2 # x_10 => add_6 # x_8 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_16, %primals_11), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_20), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, buf6, buf7, 64, grid=grid(64), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf3, buf4, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (1, 64, 16), 0), reinterpret_tensor(buf5, (16, 4, 1), (1, 16, 0), 0), out=buf9) buf10 = reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [a], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf9, buf10, 4, 16, grid=grid(4, 16), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [a], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11) buf12 = buf1; del buf1 # reuse buf13 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [a, x_8, z_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(primals_3, buf11, primals_11, buf12, buf13, 16, grid=grid(16), stream=stream0) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a, x_8, z_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(primals_3, buf11, primals_11, buf12, buf13, primals_12, primals_13, buf14, 64, grid=grid(64), stream=stream0) del buf12 del buf13 del primals_13 buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_15 buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [g], Original ATen: [aten.elu] triton_poi_fused_elu_7.run(buf15, buf16, 64, grid=grid(64), stream=stream0) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [a, x_8, x_10], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf18, primals_3, buf11, primals_11, primals_17, 64, grid=grid(64), stream=stream0) del primals_17 return (buf18, buf8, primals_3, primals_11, primals_12, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf3, buf4, buf8, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), primals_16, primals_14, primals_10, reinterpret_tensor(buf5, (16, 1, 4), (1, 1, 16), 0), primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn from typing import Optional from typing import List class FeedForward(nn.Module): """ ## FFN module """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias: 'bool'=True, bias_gate: 'bool'=True): """ * d_model is the number of features * d_ff is the number of features in the hidden layer of the FFN * dropout is dropout probability for the hidden layer * is_gated specifies whether the hidden layer is gated * bias1 specified whether the first fully connected layer should have a learnable bias * bias2 specified whether the second fully connected layer should have a learnable bias * bias_gate specified whether the fully connected layer for the gate should have a learnable bias """ super(FeedForward, self).__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias) self.layer2 = nn.Linear(d_ff, d_model, bias=bias) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class PrepareForMultiHeadAttention(nn.Module): """ ## Prepare for multi-head attention 这个linear transform作用是把query,key,value映射到同一个低维空间内 This module does a linear transformation and splits the vector into given number of heads for multi-head attention. This is used to transform **key**, **query**, and **value** vectors. """ def __init__(self, d_model: 'int', heads: 'int', d_k: 'int', bias: 'bool'): super(PrepareForMultiHeadAttention, self).__init__() self.linear = nn.Linear(d_model, heads * d_k, bias=bias) self.heads = heads self.d_k = d_k def forward(self, x: 'torch.Tensor'): head_shape = x.shape[:-1] x = self.linear(x) x = x.view(*head_shape, self.heads, self.d_k) return x class MultiHeadAttention(nn.Module): """ This computes scaled multi-headed attention for given query, key and value vectors. compute similatiry between query and key, use this as attention efficient multiply value It uses dot-product of query and key as the indicator of how matching they are. Before taking the $softmax$ the dot-products are scaled by $\\frac{1}{\\sqrt{d_k}}$. This is done to avoid large dot-product values causing softmax to give very small gradients when $d_k$ is large. Softmax is calculated along the axis of of the sequence (or time). """ def __init__(self, heads: 'int', d_model: 'int', dropout_prob: 'float'= 0.1, bias: 'bool'=True): """ * heads is the number of heads. * d_model is the number of features in the query, key and value vectors. """ super(MultiHeadAttention, self).__init__() self.d_k = d_model // heads self.heads = heads self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias) self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias) self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=True) self.softmax = nn.Softmax(dim=1) self.output = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout_prob) self.scale = 1 / math.sqrt(self.d_k) self.attn = None def get_scores(self, query: 'torch.Tensor', key: 'torch.Tensor'): """ ### Calculate scores between queries and keys,使用的是点积的方法 还可以有cosine,MLP等计算相似度的方法 """ return torch.einsum('ibhd,jbhd->ijbh', query, key) def prepare_mask(self, mask: 'torch.Tensor', query_shape: 'List[int]', key_shape: 'List[int]'): """ mask has shape [seq_len_q, seq_len_k, batch_size], where first dimension is the query dimension. If the query dimension is equal to $1$ it will be broadcasted. """ assert mask.shape[0] == 1 or mask.shape[0] == query_shape[0] assert mask.shape[1] == key_shape[0] assert mask.shape[2] == 1 or mask.shape[2] == query_shape[1] mask = mask.unsqueeze(-1) return mask def forward(self, *, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor]=None): seq_len, batch_size, _ = query.shape if mask is not None: mask = self.prepare_mask(mask, query.shape, key.shape) query = self.query(query) key = self.key(key) value = self.value(value) scores = self.get_scores(query, key) scores = scores * self.scale if mask is not None: scores = scores.masked_fill(mask == 0, float('-inf')) attn = self.softmax(scores) attn = self.dropout(attn) x = torch.einsum('ijbh,jbhd->ibhd', attn, value) self.attn = attn.detach() x = x.reshape(seq_len, batch_size, -1) return self.output(x) class EncoderLayer(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', heads: 'int', bias: 'bool'=True, is_gated: 'bool'=False, bias_gate: 'bool'=True, activation=nn.ELU(), dropout_prob: 'float'=0.1): super(EncoderLayer, self).__init__() self.attn = MultiHeadAttention(heads, d_model, dropout_prob, bias) self.feed_forward = FeedForward(d_model, d_ff, dropout_prob, activation, is_gated, bias, bias_gate) self.dropout = nn.Dropout(dropout_prob) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) def forward(self, x): z = self.norm1(x) a = self.attn(query=z, key=z, value=z) x = x + self.dropout(a) z = self.norm2(x) a = self.feed_forward(z) x = x + self.dropout(a) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn from typing import Optional from typing import List assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp0 * tmp5 tmp7 = tmp6 * tmp3 tmp8 = triton_helpers.maximum(tmp4, tmp7) tmp10 = tmp0 * tmp9 tmp11 = tmp10 * tmp3 tmp12 = triton_helpers.maximum(tmp8, tmp11) tmp14 = tmp0 * tmp13 tmp15 = tmp14 * tmp3 tmp16 = triton_helpers.maximum(tmp12, tmp15) tmp17 = tmp4 - tmp16 tmp18 = tmp17 * tmp3 tmp19 = tl_math.exp(tmp18) tmp20 = tmp7 - tmp16 tmp21 = tmp20 * tmp3 tmp22 = tl_math.exp(tmp21) tmp23 = tmp19 + tmp22 tmp24 = tmp11 - tmp16 tmp25 = tmp24 * tmp3 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp15 - tmp16 tmp29 = tmp28 * tmp3 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp3 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_elu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf3, buf4, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf3, buf4, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0) del buf7 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (1, 64, 16), 0), reinterpret_tensor(buf5, (16, 4, 1), (1, 16, 0), 0), out=buf9) buf10 = reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0) del buf6 triton_poi_fused_clone_4[grid(4, 16)](buf9, buf10, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11) buf12 = buf1 del buf1 buf13 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf11, primals_11, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_3, buf11, primals_11, buf12, buf13, primals_12, primals_13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_13 buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_15 buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_elu_7[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 triton_poi_fused_add_8[grid(64)](buf18, primals_3, buf11, primals_11, primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_17 return buf18, buf8, primals_3, primals_11, primals_12, reinterpret_tensor( buf2, (16, 4), (4, 1), 0), buf3, buf4, buf8, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, reinterpret_tensor(buf16, (16, 4), (4, 1), 0 ), primals_16, primals_14, primals_10, reinterpret_tensor(buf5, (16, 1, 4), (1, 1, 16), 0), primals_8, primals_6, primals_4 class FeedForward(nn.Module): """ ## FFN module """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias: 'bool'=True, bias_gate: 'bool'=True): """ * d_model is the number of features * d_ff is the number of features in the hidden layer of the FFN * dropout is dropout probability for the hidden layer * is_gated specifies whether the hidden layer is gated * bias1 specified whether the first fully connected layer should have a learnable bias * bias2 specified whether the second fully connected layer should have a learnable bias * bias_gate specified whether the fully connected layer for the gate should have a learnable bias """ super(FeedForward, self).__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias) self.layer2 = nn.Linear(d_ff, d_model, bias=bias) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class PrepareForMultiHeadAttention(nn.Module): """ ## Prepare for multi-head attention 这个linear transform作用是把query,key,value映射到同一个低维空间内 This module does a linear transformation and splits the vector into given number of heads for multi-head attention. This is used to transform **key**, **query**, and **value** vectors. """ def __init__(self, d_model: 'int', heads: 'int', d_k: 'int', bias: 'bool'): super(PrepareForMultiHeadAttention, self).__init__() self.linear = nn.Linear(d_model, heads * d_k, bias=bias) self.heads = heads self.d_k = d_k def forward(self, x: 'torch.Tensor'): head_shape = x.shape[:-1] x = self.linear(x) x = x.view(*head_shape, self.heads, self.d_k) return x class MultiHeadAttention(nn.Module): """ This computes scaled multi-headed attention for given query, key and value vectors. compute similatiry between query and key, use this as attention efficient multiply value It uses dot-product of query and key as the indicator of how matching they are. Before taking the $softmax$ the dot-products are scaled by $\\frac{1}{\\sqrt{d_k}}$. This is done to avoid large dot-product values causing softmax to give very small gradients when $d_k$ is large. Softmax is calculated along the axis of of the sequence (or time). """ def __init__(self, heads: 'int', d_model: 'int', dropout_prob: 'float'= 0.1, bias: 'bool'=True): """ * heads is the number of heads. * d_model is the number of features in the query, key and value vectors. """ super(MultiHeadAttention, self).__init__() self.d_k = d_model // heads self.heads = heads self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias) self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias) self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=True) self.softmax = nn.Softmax(dim=1) self.output = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout_prob) self.scale = 1 / math.sqrt(self.d_k) self.attn = None def get_scores(self, query: 'torch.Tensor', key: 'torch.Tensor'): """ ### Calculate scores between queries and keys,使用的是点积的方法 还可以有cosine,MLP等计算相似度的方法 """ return torch.einsum('ibhd,jbhd->ijbh', query, key) def prepare_mask(self, mask: 'torch.Tensor', query_shape: 'List[int]', key_shape: 'List[int]'): """ mask has shape [seq_len_q, seq_len_k, batch_size], where first dimension is the query dimension. If the query dimension is equal to $1$ it will be broadcasted. """ assert mask.shape[0] == 1 or mask.shape[0] == query_shape[0] assert mask.shape[1] == key_shape[0] assert mask.shape[2] == 1 or mask.shape[2] == query_shape[1] mask = mask.unsqueeze(-1) return mask def forward(self, *, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor]=None): seq_len, batch_size, _ = query.shape if mask is not None: mask = self.prepare_mask(mask, query.shape, key.shape) query = self.query(query) key = self.key(key) value = self.value(value) scores = self.get_scores(query, key) scores = scores * self.scale if mask is not None: scores = scores.masked_fill(mask == 0, float('-inf')) attn = self.softmax(scores) attn = self.dropout(attn) x = torch.einsum('ijbh,jbhd->ibhd', attn, value) self.attn = attn.detach() x = x.reshape(seq_len, batch_size, -1) return self.output(x) class EncoderLayerNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', heads: 'int', bias: 'bool'=True, is_gated: 'bool'=False, bias_gate: 'bool'=True, activation=nn.ELU(), dropout_prob: 'float'=0.1): super(EncoderLayerNew, self).__init__() self.attn = MultiHeadAttention(heads, d_model, dropout_prob, bias) self.feed_forward = FeedForward(d_model, d_ff, dropout_prob, activation, is_gated, bias, bias_gate) self.dropout = nn.Dropout(dropout_prob) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) def forward(self, input_0): primals_4 = self.attn.query.linear.weight primals_1 = self.attn.query.linear.bias primals_6 = self.attn.key.linear.weight primals_2 = self.attn.key.linear.bias primals_8 = self.attn.value.linear.weight primals_5 = self.attn.value.linear.bias primals_10 = self.attn.output.weight primals_7 = self.attn.output.bias primals_14 = self.feed_forward.layer1.weight primals_9 = self.feed_forward.layer1.bias primals_16 = self.feed_forward.layer2.weight primals_11 = self.feed_forward.layer2.bias primals_12 = self.norm1.weight primals_13 = self.norm1.bias primals_15 = self.norm2.weight primals_17 = self.norm2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
jamesYu365/Transfomer-example
EncoderLayer
false
12,720
[ "MIT" ]
0
a867f72f539de9746668da411f524dab45ddf12f
https://github.com/jamesYu365/Transfomer-example/tree/a867f72f539de9746668da411f524dab45ddf12f
AGRUCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/by/cbypqeb7lfdlbth5j2ww7h2bluyiqc2nrbnc76btfynxntusq5wb.py # Topologically Sorted Source Nodes: [add, reset_gate, mul, add_1, new_state, sub, mul_1, mul_2, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub, aten.tanh_backward] # Source node to ATen node mapping: # add => add # add_1 => add_1 # hy => add_2 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # new_state => tanh # reset_gate => sigmoid # sub => sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %view), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %tanh), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_4), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask) tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x2), xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp10 = tmp8 * tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp5 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tmp6 * tmp17 tmp19 = tmp10 + tmp18 tmp20 = tmp17 * tmp17 tmp21 = tmp7 - tmp20 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp19, xmask) tl.store(out_ptr2 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [gh], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, reset_gate, mul, add_1, new_state, sub, mul_1, mul_2, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub, aten.tanh_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0.run(buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0) del buf0 del primals_2 return (buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (16, 4), (12, 1), 8), buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from sklearn.metrics import * class AGRUCell(nn.Module): """ Attention based GRU (AGRU) Reference: - Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018. """ def __init__(self, input_size, hidden_size, bias=True): super(AGRUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size) ) self.register_parameter('weight_ih', self.weight_ih) self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size, hidden_size)) self.register_parameter('weight_hh', self.weight_hh) if bias: self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size)) self.register_parameter('bias_ih', self.bias_ih) self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size)) self.register_parameter('bias_hh', self.bias_hh) for tensor in [self.bias_ih, self.bias_hh]: nn.init.zeros_(tensor) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input, hx, att_score): gi = F.linear(input, self.weight_ih, self.bias_ih) gh = F.linear(hx, self.weight_hh, self.bias_hh) i_r, _i_z, i_n = gi.chunk(3, 1) h_r, _h_z, h_n = gh.chunk(3, 1) reset_gate = torch.sigmoid(i_r + h_r) new_state = torch.tanh(i_n + reset_gate * h_n) att_score = att_score.view(-1, 1) hy = (1.0 - att_score) * hx + att_score * new_state return hy def get_inputs(): return [torch.rand([16, 4]), torch.rand([16, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x2, xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp10 = tmp8 * tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp5 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tmp6 * tmp17 tmp19 = tmp10 + tmp18 tmp20 = tmp17 * tmp17 tmp21 = tmp7 - tmp20 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp19, xmask) tl.store(out_ptr2 + x2, tmp21, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0[grid(64)]( buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_2 return buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (16, 4), (12, 1), 8), buf2, buf4 class AGRUCellNew(nn.Module): """ Attention based GRU (AGRU) Reference: - Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018. """ def __init__(self, input_size, hidden_size, bias=True): super(AGRUCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size) ) self.register_parameter('weight_ih', self.weight_ih) self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size, hidden_size)) self.register_parameter('weight_hh', self.weight_hh) if bias: self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size)) self.register_parameter('bias_ih', self.bias_ih) self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size)) self.register_parameter('bias_hh', self.bias_hh) for tensor in [self.bias_ih, self.bias_hh]: nn.init.zeros_(tensor) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input_0, input_1, input_2): primals_1 = self.weight_ih primals_4 = self.weight_hh primals_2 = self.bias_ih primals_5 = self.bias_hh primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
liyunrui/DeepCTR-Torch
AGRUCell
false
12,721
[ "Apache-2.0" ]
0
392fd6d39d9ca0ac854022136cdb4d5c68e3a592
https://github.com/liyunrui/DeepCTR-Torch/tree/392fd6d39d9ca0ac854022136cdb4d5c68e3a592
CosineBasisLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vm/cvmxb4fm3754gb5xbvn445attxbnlnqmgxmnql4g235hqqjxgk4t.py # Topologically Sorted Source Nodes: [arange, i_pi, mul_1, embedding], Original ATen: [aten.arange, aten.mul, aten.cos] # Source node to ATen node mapping: # arange => add, convert_element_type, iota, mul # embedding => cos # i_pi => mul_1 # mul_1 => mul_2 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 3.141592653589793), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %mul_1), kwargs = {}) # %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%mul_2,), kwargs = {}) triton_poi_fused_arange_cos_mul_0 = async_compile.triton('triton_poi_fused_arange_cos_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_cos_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = 1 + x0 tmp2 = tmp1.to(tl.float32) tmp3 = 3.141592653589793 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl_math.cos(tmp5) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [arange, i_pi, mul_1, embedding], Original ATen: [aten.arange, aten.mul, aten.cos] stream0 = get_raw_stream(0) triton_poi_fused_arange_cos_mul_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf0, (256, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn def cosine_basis_functions(x, n_basis_functions=64): """Cosine basis functions used to embed quantile thresholds. Args: x (torch.Tensor): Input. n_basis_functions (int): Number of cosine basis functions. Returns: ndarray: Embedding with shape of (x.shape + (n_basis_functions,)). """ i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device =x.device) * np.pi embedding = torch.cos(x[..., None] * i_pi) assert embedding.shape == x.shape + (n_basis_functions,) return embedding class CosineBasisLinear(nn.Module): """Linear layer following cosine basis functions. Args: n_basis_functions (int): Number of cosine basis functions. out_size (int): Output size. """ def __init__(self, n_basis_functions, out_size): super().__init__() self.linear = nn.Linear(n_basis_functions, out_size) self.n_basis_functions = n_basis_functions self.out_size = out_size def forward(self, x): """Evaluate. Args: x (torch.Tensor): Input. Returns: torch.Tensor: Output with shape of (x.shape + (out_size,)). """ h = cosine_basis_functions(x, self.n_basis_functions) h = h.reshape(-1, self.n_basis_functions) out = self.linear(h) out = out.reshape(*x.shape, self.out_size) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_basis_functions': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_arange_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = 1 + x0 tmp2 = tmp1.to(tl.float32) tmp3 = 3.141592653589793 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl_math.cos(tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_arange_cos_mul_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (256, 4), (4, 1), 0) def cosine_basis_functions(x, n_basis_functions=64): """Cosine basis functions used to embed quantile thresholds. Args: x (torch.Tensor): Input. n_basis_functions (int): Number of cosine basis functions. Returns: ndarray: Embedding with shape of (x.shape + (n_basis_functions,)). """ i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device =x.device) * np.pi embedding = torch.cos(x[..., None] * i_pi) assert embedding.shape == x.shape + (n_basis_functions,) return embedding class CosineBasisLinearNew(nn.Module): """Linear layer following cosine basis functions. Args: n_basis_functions (int): Number of cosine basis functions. out_size (int): Output size. """ def __init__(self, n_basis_functions, out_size): super().__init__() self.linear = nn.Linear(n_basis_functions, out_size) self.n_basis_functions = n_basis_functions self.out_size = out_size def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lin826/pfrl
CosineBasisLinear
false
12,722
[ "MIT" ]
0
62d7f13b854f1879211a386fd870a7db982cc8ec
https://github.com/lin826/pfrl/tree/62d7f13b854f1879211a386fd870a7db982cc8ec
FM
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5v/c5vqsotjiyyydjenxs3ttprtusysjuigcmpcuavw4a5cbsh4movc.py # Topologically Sorted Source Nodes: [sum_1, square_of_sum, mul, sum_of_square, cross_term, sum_3, cross_term_1], Original ATen: [aten.sum, aten.pow, aten.mul, aten.sub] # Source node to ATen node mapping: # cross_term => sub # cross_term_1 => mul_1 # mul => mul # square_of_sum => pow_1 # sum_1 => sum_1 # sum_3 => sum_3 # sum_of_square => sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1], True), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, %sum_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [2]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.5), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + (x2), tmp68, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sum_1, square_of_sum, mul, sum_of_square, cross_term, sum_3, cross_term_1], Original ATen: [aten.sum, aten.pow, aten.mul, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from sklearn.metrics import * class FM(nn.Module): """Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. References - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) """ def __init__(self): super(FM, self).__init__() def forward(self, inputs): fm_input = inputs square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2) sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True) cross_term = square_of_sum - sum_of_square cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False) return cross_term def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + x2, tmp68, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class FMNew(nn.Module): """Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. References - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) """ def __init__(self): super(FMNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
liyunrui/DeepCTR-Torch
FM
false
12,723
[ "Apache-2.0" ]
0
392fd6d39d9ca0ac854022136cdb4d5c68e3a592
https://github.com/liyunrui/DeepCTR-Torch/tree/392fd6d39d9ca0ac854022136cdb4d5c68e3a592