entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
LinearZeros
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/dr/cdrlzpsdnaf5daayrtkicddem5cqnkmtu2cyqg5y26lqg3nvvxrr.py # Topologically Sorted Source Nodes: [mul, exp, mul_1], Original ATen: [aten.mul, aten.exp] # Source node to ATen node mapping: # exp => exp # mul => mul # mul_1 => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %exp), kwargs = {}) triton_poi_fused_exp_mul_0 = async_compile.triton('triton_poi_fused_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, exp, mul_1], Original ATen: [aten.mul, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_mul_0.run(buf0, primals_4, buf1, 256, grid=grid(256), stream=stream0) return (buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LinearZeros(nn.Module): def __init__(self, in_channels, out_channels, logscale_factor=3): super().__init__() self.linear = nn.Linear(in_channels, out_channels) self.linear.weight.data.zero_() self.linear.bias.data.zero_() self.logscale_factor = logscale_factor self.logs = nn.Parameter(torch.zeros(out_channels)) def forward(self, input): output = self.linear(input) return output * torch.exp(self.logs * self.logscale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_0[grid(256)](buf0, primals_4, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class LinearZerosNew(nn.Module): def __init__(self, in_channels, out_channels, logscale_factor=3): super().__init__() self.linear = nn.Linear(in_channels, out_channels) self.linear.weight.data.zero_() self.linear.bias.data.zero_() self.logscale_factor = logscale_factor self.logs = nn.Parameter(torch.zeros(out_channels)) def forward(self, input_0): primals_2 = self.logs primals_1 = self.linear.weight primals_4 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
appuzanova/Glow-PyTorch
LinearZeros
false
12,219
[ "MIT" ]
0
50316b1b242f0f345b2df9e3e4538cfab5a60895
https://github.com/appuzanova/Glow-PyTorch/tree/50316b1b242f0f345b2df9e3e4538cfab5a60895
Conv2dZeros
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zv/czvhqogqibwnm23q44xn6gzvpm2ac5f4wseb2e7zgfnmpvynwgoy.py # Topologically Sorted Source Nodes: [output, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp] # Source node to ATen node mapping: # exp => exp # mul => mul # mul_1 => mul_1 # output => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %exp), kwargs = {}) triton_poi_fused_convolution_exp_mul_0 = async_compile.triton('triton_poi_fused_convolution_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf2, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def compute_same_pad(kernel_size, stride): if isinstance(kernel_size, int): kernel_size = [kernel_size] if isinstance(stride, int): stride = [stride] assert len(stride) == len(kernel_size ), 'Pass kernel size and stride both as int, or both as equal length iterable' return [(((k - 1) * s + 1) // 2) for k, s in zip(kernel_size, stride)] class Conv2dZeros(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding='same', logscale_factor=3): super().__init__() if padding == 'same': padding = compute_same_pad(kernel_size, stride) elif padding == 'valid': padding = 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.logscale_factor = logscale_factor self.logs = nn.Parameter(torch.zeros(out_channels, 1, 1)) def forward(self, input): output = self.conv(input) return output * torch.exp(self.logs * self.logscale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 def compute_same_pad(kernel_size, stride): if isinstance(kernel_size, int): kernel_size = [kernel_size] if isinstance(stride, int): stride = [stride] assert len(stride) == len(kernel_size ), 'Pass kernel size and stride both as int, or both as equal length iterable' return [(((k - 1) * s + 1) // 2) for k, s in zip(kernel_size, stride)] class Conv2dZerosNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding='same', logscale_factor=3): super().__init__() if padding == 'same': padding = compute_same_pad(kernel_size, stride) elif padding == 'valid': padding = 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.logscale_factor = logscale_factor self.logs = nn.Parameter(torch.zeros(out_channels, 1, 1)) def forward(self, input_0): primals_4 = self.logs primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
appuzanova/Glow-PyTorch
Conv2dZeros
false
12,220
[ "MIT" ]
0
50316b1b242f0f345b2df9e3e4538cfab5a60895
https://github.com/appuzanova/Glow-PyTorch/tree/50316b1b242f0f345b2df9e3e4538cfab5a60895
MaxSpatialPoolP4
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/42/c427kvrjmqncmw2xyqovuy3r4njc53tuzaapdqy5btnunaf64ze7.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # y_1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%view, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + (x0), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MaxSpatialPoolP4(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super().__init__() self.inner = torch.nn.MaxPool2d(kernel_size, stride, padding) def forward(self, x): y = x.view(x.size(0), -1, x.size(3), x.size(4)) y = self.inner(y) y = y.view(x.size(0), -1, 4, y.size(2), y.size(3)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x0, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0), class MaxSpatialPoolP4New(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super().__init__() self.inner = torch.nn.MaxPool2d(kernel_size, stride, padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
claudio-unipv/groupcnn
MaxSpatialPoolP4
false
12,222
[ "MIT" ]
0
2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
https://github.com/claudio-unipv/groupcnn/tree/2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
ModulatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ri/criuvsdl3sferb4bb6ci5zaps3wys7xxcpybz7vfo2ba4q7cuq6c.py # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # demod => rsqrt # mul_2 => mul_2 # pow_1 => pow_1 # sum_1 => sum_1 # weight => mul_3 # weight_1 => mul_4 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.125), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_1), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = (rindex // 16) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_3, buf0, 16, grid=grid(16), stream=stream0) del primals_3 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_4, buf1, 4, grid=grid(4), stream=stream0) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0; del buf0 # reuse buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_2.run(buf4, primals_5, buf2, buf5, 16, 64, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return (reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0), primals_2, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if len(k.shape) == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, minor, in_h, in_w = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, minor, in_h, 1, in_w, 1) out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) out = out.view(-1, minor, in_h * up_y, in_w * up_x) out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max( pad_y1, 0)]) out = out[:, :, max(-pad_y0, 0):out.shape[2] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[3] - max(-pad_x1, 0)] out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[ 1], pad[0], pad[1]) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = math.sqrt(1) / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = math.sqrt(1) / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) if style_dim is not None and style_dim > 0: self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape if style is not None: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) else: style = torch.ones(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_4, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_2, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if len(k.shape) == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, minor, in_h, in_w = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, minor, in_h, 1, in_w, 1) out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) out = out.view(-1, minor, in_h * up_y, in_w * up_x) out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max( pad_y1, 0)]) out = out[:, :, max(-pad_y0, 0):out.shape[2] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[3] - max(-pad_x1, 0)] out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[ 1], pad[0], pad[1]) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = math.sqrt(1) / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = math.sqrt(1) / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) if style_dim is not None and style_dim > 0: self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_2 = self.modulation.weight primals_4 = self.modulation.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
bomtorazek/contrastive-unpaired-translation
ModulatedConv2d
false
12,223
[ "BSD-3-Clause" ]
0
07c048038375e1b9a4e464154b8dbc49f5e16ede
https://github.com/bomtorazek/contrastive-unpaired-translation/tree/07c048038375e1b9a4e464154b8dbc49f5e16ede
Pooler
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/oy/coyyh4komknigd6s5bag6tix6g4xmd2zqzeqo5stuxj2oszsrdim.py # Topologically Sorted Source Nodes: [pad_mask], Original ATen: [aten.eq] # Source node to ATen node mapping: # pad_mask => eq # Graph fragment: # %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {}) triton_poi_fused_eq_0 = async_compile.triton('triton_poi_fused_eq_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3w/c3wlapzaxh7t3dkquomscia7dkqslttzsx2utb3vkxga32r5irr5.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {}) triton_poi_fused_max_1 = async_compile.triton('triton_poi_fused_max_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 512) x0 = xindex % 512 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), None, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), None, eviction_policy='evict_last').to(tl.int1) tmp5 = tl.load(in_ptr1 + (512 + x0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), None, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr1 + (1024 + x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + (4*x1)), None, eviction_policy='evict_last').to(tl.int1) tmp13 = tl.load(in_ptr1 + (1536 + x0), None, eviction_policy='evict_last') tmp2 = float("-inf") tmp3 = tl.where(tmp0, tmp2, tmp1) tmp6 = tl.where(tmp4, tmp2, tmp5) tmp7 = triton_helpers.maximum(tmp3, tmp6) tmp10 = tl.where(tmp8, tmp2, tmp9) tmp11 = triton_helpers.maximum(tmp7, tmp10) tmp14 = tl.where(tmp12, tmp2, tmp13) tmp15 = triton_helpers.maximum(tmp11, tmp14) tl.store(out_ptr0 + (x2), tmp15, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (512, 4), (4, 1)) assert_size_stride(primals_3, (512, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [pad_mask], Original ATen: [aten.eq] stream0 = get_raw_stream(0) triton_poi_fused_eq_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_seq], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(primals_2, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] triton_poi_fused_max_1.run(buf0, buf1, buf2, 2048, grid=grid(2048), stream=stream0) return (buf2, primals_4, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed class Pooler(nn.Module): """ Do pooling, possibly with a projection beforehand """ def __init__(self, d_inp, project=True, d_proj=512, pool_type='max'): super(Pooler, self).__init__() self.project = nn.Linear(d_inp, d_proj) if project else lambda x: x self.pool_type = pool_type def forward(self, sequence, mask): if len(mask.size()) < 3: mask = mask.unsqueeze(dim=-1) pad_mask = mask == 0 proj_seq = self.project(sequence) if self.pool_type == 'max': proj_seq = proj_seq.masked_fill(pad_mask, -float('inf')) seq_emb = proj_seq.max(dim=1)[0] elif self.pool_type == 'mean': proj_seq = proj_seq.masked_fill(pad_mask, 0) seq_emb = proj_seq.sum(dim=1) / mask.sum(dim=1) elif self.pool_type == 'final': idxs = mask.expand_as(proj_seq).sum(dim=1, keepdim=True).long() - 1 seq_emb = proj_seq.gather(dim=1, index=idxs) return seq_emb @classmethod def from_params(cls, d_inp, d_proj, project=True): return cls(d_inp, d_proj=d_proj, project=project) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_inp': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_max_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 512 x0 = xindex % 512 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, None, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), None, eviction_policy='evict_last' ).to(tl.int1) tmp5 = tl.load(in_ptr1 + (512 + x0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), None, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (1024 + x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), None, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr1 + (1536 + x0), None, eviction_policy='evict_last') tmp2 = float('-inf') tmp3 = tl.where(tmp0, tmp2, tmp1) tmp6 = tl.where(tmp4, tmp2, tmp5) tmp7 = triton_helpers.maximum(tmp3, tmp6) tmp10 = tl.where(tmp8, tmp2, tmp9) tmp11 = triton_helpers.maximum(tmp7, tmp10) tmp14 = tl.where(tmp12, tmp2, tmp13) tmp15 = triton_helpers.maximum(tmp11, tmp14) tl.store(out_ptr0 + x2, tmp15, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (512, 4), (4, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor( primals_2, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32) triton_poi_fused_max_1[grid(2048)](buf0, buf1, buf2, 2048, XBLOCK= 128, num_warps=4, num_stages=1) return buf2, primals_4, buf0, buf1 class PoolerNew(nn.Module): """ Do pooling, possibly with a projection beforehand """ def __init__(self, d_inp, project=True, d_proj=512, pool_type='max'): super(PoolerNew, self).__init__() self.project = nn.Linear(d_inp, d_proj) if project else lambda x: x self.pool_type = pool_type @classmethod def from_params(cls, d_inp, d_proj, project=True): return cls(d_inp, d_proj=d_proj, project=project) def forward(self, input_0, input_1): primals_2 = self.project.weight primals_3 = self.project.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
cjmay/jiant
Pooler
false
12,224
[ "MIT" ]
0
46e6fa9d0fc73883468646cbd0f36f4166720911
https://github.com/cjmay/jiant/tree/46e6fa9d0fc73883468646cbd0f36f4166720911
ConvZ2P4
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5h/c5hghr3snia5qqmend6rsbomhhf6dms5lhj6jqdqjlzyuzujwtvh.py # Topologically Sorted Source Nodes: [cat, w], Original ATen: [aten.cat, aten.view] # Source node to ATen node mapping: # cat => cat # w => view # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %permute, %rev_1, %permute_1], 1), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%cat, [-1, 4, 4, 4]), kwargs = {}) triton_poi_fused_cat_view_0 = async_compile.triton('triton_poi_fused_cat_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) % 16 x3 = (xindex // 256) x4 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x6 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (16*x2) + (64*x3)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (3 + ((-1)*x1) + (4*x0) + (16*((-4) + x2)) + (64*x3)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (15 + ((-1)*x4) + (16*((-8) + x2)) + (64*x3)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (12 + x1 + ((-4)*x0) + (16*((-12) + x2)) + (64*x3)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(in_out_ptr0 + (x6), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zi/cziw7t7r4rwnrbkr2wbyynko63el7sopwo5iqpsuxrrcdm7c3ovk.py # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.add] # Source node to ATen node mapping: # y_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_2), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 36) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (16, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [cat, w], Original ATen: [aten.cat, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_cat_view_0.run(buf1, primals_1, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_2, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 3, 3), (144, 9, 3, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 3, 3), (144, 36, 9, 3, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf3, primals_3, 576, grid=grid(576), stream=stream0) del primals_3 return (buf3, primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class ConvZ2P4(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, padding=1): super().__init__() w = torch.empty(out_channels, in_channels, kernel_size, kernel_size) self.weight = torch.nn.Parameter(w) torch.nn.init.kaiming_uniform_(self.weight, a=5 ** 0.5) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None self.stride = stride self.padding = padding def _rotated(self, w): ws = [torch.rot90(w, k, (2, 3)) for k in range(4)] return torch.cat(ws, 1).view(-1, w.size(1), w.size(2), w.size(3)) def forward(self, x): w = self._rotated(self.weight) y = torch.nn.functional.conv2d(x, w, stride=self.stride, padding= self.padding) y = y.view(y.size(0), -1, 4, y.size(2), y.size(3)) if self.bias is not None: y = y + self.bias.view(1, -1, 1, 1, 1) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 16 x3 = xindex // 256 x4 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x6 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (3 + -1 * x1 + 4 * x0 + 16 * (-4 + x2) + 64 * x3), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (15 + -1 * x4 + 16 * (-8 + x2) + 64 * x3), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (12 + x1 + -4 * x0 + 16 * (-12 + x2) + 64 * x3), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(in_out_ptr0 + x6, tmp22, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) buf1 = reinterpret_tensor(buf0, (16, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_cat_view_0[grid(1024)](buf1, primals_1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(primals_2, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 3, 3), (144, 9, 3, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 3, 3), (144, 36, 9, 3, 1), 0) del buf2 triton_poi_fused_add_1[grid(576)](buf3, primals_3, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf3, primals_2, buf1 class ConvZ2P4New(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, padding=1): super().__init__() w = torch.empty(out_channels, in_channels, kernel_size, kernel_size) self.weight = torch.nn.Parameter(w) torch.nn.init.kaiming_uniform_(self.weight, a=5 ** 0.5) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None self.stride = stride self.padding = padding def _rotated(self, w): ws = [torch.rot90(w, k, (2, 3)) for k in range(4)] return torch.cat(ws, 1).view(-1, w.size(1), w.size(2), w.size(3)) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
claudio-unipv/groupcnn
ConvZ2P4
false
12,225
[ "MIT" ]
0
2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
https://github.com/claudio-unipv/groupcnn/tree/2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
Envelope
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/a5/ca552euenr5zronvokyf6hu3n26y6t2owlk46lpzmvr73d2zc43w.py # Topologically Sorted Source Nodes: [lt, truediv, x_pow_p0, mul_1, add, x_pow_p1, mul_2, add_1, mul_3, mul_4, env_val, zero, where], Original ATen: [aten.lt, aten.reciprocal, aten.mul, aten.pow, aten.add, aten.zeros_like, aten.where] # Source node to ATen node mapping: # add => add # add_1 => add_1 # env_val => add_2 # lt => lt # mul_1 => mul_2 # mul_2 => mul_3 # mul_3 => mul_4 # mul_4 => mul_5 # truediv => mul_1, reciprocal # where => where # x_pow_p0 => pow_1 # x_pow_p1 => mul # zero => full_default # Graph fragment: # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%arg0_1, 1), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -15.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %arg0_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 24), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, -10.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %arg0_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_5), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %add_2, %full_default), kwargs = {}) triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0 = async_compile.triton('triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp3 / tmp0 tmp5 = tmp4 * tmp1 tmp6 = tmp0 * tmp0 tmp7 = tmp6 * tmp6 tmp8 = -15.0 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tmp11 = tmp7 * tmp0 tmp12 = 24.0 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -10.0 tmp16 = tmp11 * tmp15 tmp17 = tmp16 * tmp0 tmp18 = tmp14 + tmp17 tmp19 = 0.0 tmp20 = tl.where(tmp2, tmp18, tmp19) tl.store(out_ptr0 + (x0), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [lt, truediv, x_pow_p0, mul_1, add, x_pow_p1, mul_2, add_1, mul_3, mul_4, env_val, zero, where], Original ATen: [aten.lt, aten.reciprocal, aten.mul, aten.pow, aten.add, aten.zeros_like, aten.where] stream0 = get_raw_stream(0) triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class Envelope(torch.nn.Module): def __init__(self, exponent): super(Envelope, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, x): p, a, b, c = self.p, self.a, self.b, self.c x_pow_p0 = x.pow(p) x_pow_p1 = x_pow_p0 * x env_val = 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p1 * x zero = torch.zeros_like(x) return torch.where(x < 1, env_val, zero) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'exponent': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp3 / tmp0 tmp5 = tmp4 * tmp1 tmp6 = tmp0 * tmp0 tmp7 = tmp6 * tmp6 tmp8 = -15.0 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tmp11 = tmp7 * tmp0 tmp12 = 24.0 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -10.0 tmp16 = tmp11 * tmp15 tmp17 = tmp16 * tmp0 tmp18 = tmp14 + tmp17 tmp19 = 0.0 tmp20 = tl.where(tmp2, tmp18, tmp19) tl.store(out_ptr0 + x0, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_lt_mul_pow_reciprocal_where_zeros_like_0[grid(256) ](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class EnvelopeNew(torch.nn.Module): def __init__(self, exponent): super(EnvelopeNew, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
coopersigrist/Multi-fragment-energy
Envelope
false
12,226
[ "MIT" ]
0
c21c1b884f364cf3f2ac71e393464e85ebeccb04
https://github.com/coopersigrist/Multi-fragment-energy/tree/c21c1b884f364cf3f2ac71e393464e85ebeccb04
SoftEntropy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax_1, exp_1, sub_2 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bg/cbg32drchyezvbfwshguvyopixmzwi2llws7xkhvpdruis76tr2t.py # Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_probs => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qb/cqbakowgciuzs25w7ws4yr2pp5cb457kjb4ikooghfei7p5xsin5.py # Topologically Sorted Source Nodes: [softmax, neg, log_probs, mul], Original ATen: [aten._softmax, aten.neg, aten._log_softmax, aten.mul] # Source node to ATen node mapping: # log_probs => exp, log, sub_1, sum_1 # mul => mul # neg => neg # softmax => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_1), kwargs = {}) triton_poi_fused__log_softmax__softmax_mul_neg_2 = async_compile.triton('triton_poi_fused__log_softmax__softmax_mul_neg_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_mul_neg_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax__softmax_mul_neg_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (x3), xmask) tmp11 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = -tmp8 tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = tmp9 * tmp23 tl.store(out_ptr0 + (x3), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sv/csvwppyeutu2vnic34436e4qzo4dezwyeghwukd426sewew73kif.py # Topologically Sorted Source Nodes: [mean, loss], Original ATen: [aten.mean, aten.sum] # Source node to ATen node mapping: # loss => sum_3 # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [0]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mean,), kwargs = {}) triton_per_fused_mean_sum_3 = async_compile.triton('triton_per_fused_mean_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax, neg, log_probs, mul], Original ATen: [aten._softmax, aten.neg, aten._log_softmax, aten.mul] triton_poi_fused__log_softmax__softmax_mul_neg_2.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [mean, loss], Original ATen: [aten.mean, aten.sum] triton_per_fused_mean_sum_3.run(buf2, buf3, 1, 64, grid=grid(1), stream=stream0) del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F from torch.nn import * from torch.optim.lr_scheduler import * class SoftEntropy(nn.Module): def __init__(self): super(SoftEntropy, self).__init__() self.logsoftmax = nn.LogSoftmax(dim=1) def forward(self, inputs, targets): log_probs = self.logsoftmax(inputs) loss = (-F.softmax(targets, dim=1).detach() * log_probs).mean(0).sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax__softmax_mul_neg_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + x3, xmask) tmp11 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = -tmp8 tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = tmp9 * tmp23 tl.store(out_ptr0 + x3, tmp24, xmask) @triton.jit def triton_per_fused_mean_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_mean_sum_3[grid(1)](buf2, buf3, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf3, class SoftEntropyNew(nn.Module): def __init__(self): super(SoftEntropyNew, self).__init__() self.logsoftmax = nn.LogSoftmax(dim=1) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chrizandr/MMT
SoftEntropy
false
12,227
[ "MIT" ]
0
e2bb5984efb165e7ea1ed6080610cfe176344ac0
https://github.com/chrizandr/MMT/tree/e2bb5984efb165e7ea1ed6080610cfe176344ac0
FourierFeatures
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/4n/c4nzpnmnxnddegpx57hh6jact2tympwu7veza5bsp5ut7t35yaq2.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 6.283185307179586), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 6.283185307179586 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/v4/cv4pft4jyhaxps3kfatrvu4flmf2zjqqsfush3cniwijnyyxmmmn.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cos, %sin], -1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl_math.cos(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr0 + ((2*x1) + ((-2) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.sin(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [f], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) return (buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class FourierFeatures(nn.Module): def __init__(self, in_features, out_features, std=1.0): super().__init__() assert out_features % 2 == 0 self.weight = nn.Parameter(torch.randn([out_features // 2, in_features]) * std) def forward(self, input): f = 2 * math.pi * input @ self.weight.T return torch.cat([f.cos(), f.sin()], dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 6.283185307179586 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl_math.cos(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp12 = tl.load(in_ptr0 + (2 * x1 + (-2 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.sin(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1 class FourierFeaturesNew(nn.Module): def __init__(self, in_features, out_features, std=1.0): super().__init__() assert out_features % 2 == 0 self.weight = nn.Parameter(torch.randn([out_features // 2, in_features]) * std) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
corajr/diffusion_gen
FourierFeatures
false
12,228
[ "MIT" ]
0
724377c8e244120cbd1caa75d474e3e14ded9bfa
https://github.com/corajr/diffusion_gen/tree/724377c8e244120cbd1caa75d474e3e14ded9bfa
MaxRotationPoolP4
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vy/cvytfio772nhve7g7rhew5t4eiv7s2whjjlerc47yi2xbz6yb2ft.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {}) triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MaxRotationPoolP4(torch.nn.Module): def forward(self, x): return x.max(2).values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxRotationPoolP4New(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
claudio-unipv/groupcnn
MaxRotationPoolP4
false
12,229
[ "MIT" ]
0
2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
https://github.com/claudio-unipv/groupcnn/tree/2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
LinearFeedforward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F import torch.utils.data class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(F, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearFeedforward(nn.Module): def __init__(self, d_in, d_hid, d_out, activation='relu'): super().__init__() self.feedforward = Feedforward(d_in, d_hid, activation=activation) self.linear = Linear(d_hid, d_out) self.dropout = nn.Dropout(0.2) def forward(self, x): return self.dropout(self.linear(self.feedforward(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4, 'd_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3 class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(F, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearFeedforwardNew(nn.Module): def __init__(self, d_in, d_hid, d_out, activation='relu'): super().__init__() self.feedforward = Feedforward(d_in, d_hid, activation=activation) self.linear = Linear(d_hid, d_out) self.dropout = nn.Dropout(0.2) def forward(self, input_0): primals_2 = self.feedforward.linear.weight primals_3 = self.feedforward.linear.bias primals_4 = self.linear.weight primals_5 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
cristipp/decaNLP
LinearFeedforward
false
12,230
[ "BSD-3-Clause" ]
0
db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
https://github.com/cristipp/decaNLP/tree/db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
ConvP4
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/7v/c7vi5wilxrt357uxtvmptf6pv2gumzfirbj2dqvghsfxblam6qqu.py # Topologically Sorted Source Nodes: [roll, roll_2], Original ATen: [aten.roll] # Source node to ATen node mapping: # roll => iota # roll_2 => add_2, fmod_2 # Graph fragment: # %iota : [num_users=4] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%iota, 2), kwargs = {}) # %fmod_2 : [num_users=2] = call_function[target=torch.ops.aten.fmod.Scalar](args = (%add_2, 4), kwargs = {}) triton_poi_fused_roll_0 = async_compile.triton('triton_poi_fused_roll_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_roll_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_roll_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = (2 + x0) % 4 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uc/cuc5fjmhkopemtu747jj7izse5wnwx55cifyf6tvxzsmpk6w7deo.py # Topologically Sorted Source Nodes: [roll, roll_3], Original ATen: [aten.roll] # Source node to ATen node mapping: # roll => iota # roll_3 => add_3, fmod_3 # Graph fragment: # %iota : [num_users=4] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%iota, 1), kwargs = {}) # %fmod_3 : [num_users=2] = call_function[target=torch.ops.aten.fmod.Scalar](args = (%add_3, 4), kwargs = {}) triton_poi_fused_roll_1 = async_compile.triton('triton_poi_fused_roll_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_roll_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_roll_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = (1 + x0) % 4 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a4/ca45n7a2ofberryhgkwdt3e6ifozz6kobcsqnv5anoirlakre2wq.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %view_2, %view_3, %view_4], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 16) % 64 x3 = (xindex // 1024) x4 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 16, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (16*(x2 % 16)) + (256*x3)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 32, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (3 + ((-1)*x1) + (4*x0) + (16*((3 + (((-16) + x2) % 4)) % 4)) + (64*((((-16) + x2) // 4) % 4)) + (256*x3)), tmp9, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 48, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (15 + ((-1)*x4) + (16*(tl.where(((2 + (((-32) + x2) % 4)) % 4) < 0, 4 + ((2 + (((-32) + x2) % 4)) % 4), (2 + (((-32) + x2) % 4)) % 4))) + (64*((((-32) + x2) // 4) % 4)) + (256*x3)), tmp14, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 64, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (12 + x1 + ((-4)*x0) + (16*(tl.where(((1 + (((-48) + x2) % 4)) % 4) < 0, 4 + ((1 + (((-48) + x2) % 4)) % 4), (1 + (((-48) + x2) % 4)) % 4))) + (64*((((-48) + x2) // 4) % 4)) + (256*x3)), tmp16, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x5), tmp22, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gx/cgxwdmeiaiuo4ukxyhrapaowlicnvf5vihr64qidtjvjq4idwboo.py # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.add] # Source node to ATen node mapping: # y_2 => add_4 # Graph fragment: # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %view_7), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 36) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [roll, roll_2], Original ATen: [aten.roll] stream0 = get_raw_stream(0) triton_poi_fused_roll_0.run(buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [roll, roll_3], Original ATen: [aten.roll] triton_poi_fused_roll_1.run(buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(primals_2, buf2, 4096, grid=grid(4096), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (16, 16, 4, 4), (256, 16, 4, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 16, 3, 3), (144, 9, 3, 1)) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 3, 3), (144, 36, 9, 3, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf4, primals_3, 576, grid=grid(576), stream=stream0) del primals_3 return (buf4, reinterpret_tensor(primals_1, (4, 16, 4, 4), (256, 16, 4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (16, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch def _grot90(x, k): return torch.rot90(x.roll(k, 2), k, (3, 4)) class ConvP4(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, padding=1): super().__init__() w = torch.empty(out_channels, in_channels, 4, kernel_size, kernel_size) self.weight = torch.nn.Parameter(w) torch.nn.init.kaiming_uniform_(self.weight, a=5 ** 0.5) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None self.stride = stride self.padding = padding def _rotated(self, w): ws = [_grot90(w, k).view(w.size(0), -1, w.size(3), w.size(4)) for k in range(4)] return torch.cat(ws, 1).view(4 * w.size(0), 4 * w.size(1), w.size(3 ), w.size(4)) def forward(self, x): x = x.view(x.size(0), -1, x.size(3), x.size(4)) w = self._rotated(self.weight) y = torch.nn.functional.conv2d(x, w, stride=self.stride, padding= self.padding) y = y.view(y.size(0), -1, 4, y.size(2), y.size(3)) if self.bias is not None: y = y + self.bias.view(1, -1, 1, 1, 1) return y def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_roll_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = (2 + x0) % 4 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_roll_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = (1 + x0) % 4 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 16 % 64 x3 = xindex // 1024 x4 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 16, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * (x2 % 16) + 256 * x3), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 32, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (3 + -1 * x1 + 4 * x0 + 16 * ((3 + (-16 + x2) % 4) % 4) + 64 * ((-16 + x2) // 4 % 4) + 256 * x3), tmp9, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 48, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (15 + -1 * x4 + 16 * tl.where((2 + (-32 + x2) % 4) % 4 < 0, 4 + (2 + (-32 + x2) % 4) % 4, (2 + (-32 + x2) % 4) % 4) + 64 * ((-32 + x2) // 4 % 4) + 256 * x3), tmp14, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 64, tl.int64) tmp19 = tl.load(in_ptr0 + (12 + x1 + -4 * x0 + 16 * tl.where((1 + (-48 + x2) % 4) % 4 < 0, 4 + (1 + (-48 + x2) % 4) % 4, (1 + (-48 + x2) % 4 ) % 4) + 64 * ((-48 + x2) // 4 % 4) + 256 * x3), tmp16, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x5, tmp22, None) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused_roll_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_roll_1[grid(4)](buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(4096)](primals_2, buf2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (16, 16, 4, 4), (256, 16, 4, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 16, 3, 3), (144, 9, 3, 1)) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 3, 3), (144, 36, 9, 3, 1), 0) del buf3 triton_poi_fused_add_3[grid(576)](buf4, primals_3, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf4, reinterpret_tensor(primals_1, (4, 16, 4, 4), (256, 16, 4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (16, 16, 4, 4), (256, 16, 4, 1), 0) def _grot90(x, k): return torch.rot90(x.roll(k, 2), k, (3, 4)) class ConvP4New(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, padding=1): super().__init__() w = torch.empty(out_channels, in_channels, 4, kernel_size, kernel_size) self.weight = torch.nn.Parameter(w) torch.nn.init.kaiming_uniform_(self.weight, a=5 ** 0.5) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None self.stride = stride self.padding = padding def _rotated(self, w): ws = [_grot90(w, k).view(w.size(0), -1, w.size(3), w.size(4)) for k in range(4)] return torch.cat(ws, 1).view(4 * w.size(0), 4 * w.size(1), w.size(3 ), w.size(4)) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
claudio-unipv/groupcnn
ConvP4
false
12,231
[ "MIT" ]
0
2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
https://github.com/claudio-unipv/groupcnn/tree/2b1514f5a0fb9a78c6f646e1c075e5c3d5af9c0c
Attention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py # Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.clone] # Source node to ATen node mapping: # dot_products => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/eh/ceheq5ns3kg3p6tebb47gdy475c5v4keklf245jl4fgbpvugznm5.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1) del arg1_1 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4) del arg2_1 del buf3 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_key': 4, 'dropout_ratio': 0.5, 'causal': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out =buf1) del arg1_1 buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class AttentionNew(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
cristipp/decaNLP
Attention
false
12,232
[ "BSD-3-Clause" ]
0
db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
https://github.com/cristipp/decaNLP/tree/db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
MultiHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => exp # Graph fragment: # %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [-1], True), kwargs = {}) # %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {}) # %div_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mk/cmkim2hc4ksxhatli3y5cu7hoqofxcbzqrrxvnlhmswdt4cgww25.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], -1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [dot_products_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm] extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [dot_products_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0) buf13 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [dot_products_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0) buf17 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm] extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0) del buf10 del buf14 del buf18 del buf6 return (buf19, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), buf5, buf9, buf13, buf17, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_key': 4, 'd_value': 4, 'n_heads': 4, 'dropout_ratio': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 del buf18 del buf6 return buf19, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0 ), buf5, buf9, buf13, buf17, reinterpret_tensor(buf2, (4, 1, 4), ( 16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1 ), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0) def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHeadNew(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, input_0, input_1, input_2): primals_2 = self.wq.weight primals_4 = self.wk.weight primals_6 = self.wv.weight primals_1 = input_0 primals_3 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
cristipp/decaNLP
MultiHead
false
12,233
[ "BSD-3-Clause" ]
0
db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
https://github.com/cristipp/decaNLP/tree/db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
CaffeNormalize
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/yt/cytrrfad2cx3uy3zkm7m3c4iejunfa6gq5sz2n6hdlhtvk7byu55.py # Topologically Sorted Source Nodes: [norm, add, x, mul], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # norm => pow_1, pow_2, sum_1 # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-07), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, add, x, mul], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class CaffeNormalize(nn.Module): def __init__(self, features, eps=1e-07): super(CaffeNormalize, self).__init__() self.scale = nn.Parameter(10.0 * torch.ones(features)) self.eps = eps def forward(self, x): x_size = x.size() norm = x.norm(2, dim=1, keepdim=True) x = x.div(norm + self.eps) return x.mul(self.scale.view(1, x_size[1], 1, 1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class CaffeNormalizeNew(nn.Module): def __init__(self, features, eps=1e-07): super(CaffeNormalizeNew, self).__init__() self.scale = nn.Parameter(10.0 * torch.ones(features)) self.eps = eps def forward(self, input_0): primals_2 = self.scale primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
cynthiamao98/DepthAwareCNN
CaffeNormalize
false
12,234
[ "MIT" ]
0
824cffaa4159e3dc7cc251a4a659e35c437bb92c
https://github.com/cynthiamao98/DepthAwareCNN/tree/824cffaa4159e3dc7cc251a4a659e35c437bb92c
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => exp # Graph fragment: # %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [-1], True), kwargs = {}) # %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {}) # %div_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mk/cmkim2hc4ksxhatli3y5cu7hoqofxcbzqrrxvnlhmswdt4cgww25.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], -1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7f/c7fwok6q7j5rvjs3ob32s2cth5xjbedhynzb5ozchylog57bhmxv.py # Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std] # Source node to ATen node mapping: # add => add # mean => mean # std => var # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %cat), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add, [-1]), kwargs = {correction: 1.0, keepdim: True}) triton_poi_fused_add_mean_std_3 = async_compile.triton('triton_poi_fused_add_mean_std_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + (x0), tmp29, xmask) tl.store(out_ptr0 + (x0), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dw/cdwd24bmovp4kvuenv3jq6ffpahgl34iziauouexc57lxivmzubp.py # Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_4, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # mean => mean # mul => mul # std => sqrt # sub => sub_4 # truediv_4 => div_8 # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %cat), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %sub_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_8, %primals_6), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_4 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tmp0 * tmp5 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tmp11 = tmp6 / tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/va/cvayouropyisaprtjrhemadbdvsels72axdjsrgmbayknhu335yc.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_31,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dg/cdg2dxfjk7prchu44e4cgkid2y4524hl5vpyijgt6dwrnsrwzz2k.py # Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add] # Source node to ATen node mapping: # add_3 => add_3 # Graph fragment: # %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_33), kwargs = {}) triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/j4/cj4wrybpym5umgwi5ropl654n64ptcknq2hunhzirmo6b5jmhqyj.py # Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_5, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # add_4 => add_4 # add_5 => add_5 # mean_2 => mean_1 # mul_1 => mul_1 # std_2 => sqrt_1, var_1 # sub_1 => sub_5 # truediv_5 => div_9 # Graph fragment: # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_3, [-1], True), kwargs = {}) # %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_3, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var_1,), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %mean_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %sub_5), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-06), kwargs = {}) # %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_4), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_9, %primals_12), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_7 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [dot_products_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm] extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [dot_products_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0) buf13 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [dot_products_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0) buf17 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm] extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0) del buf10 del buf14 buf20 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0); del buf6 # reuse buf21 = buf20; del buf20 # reuse buf22 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std] triton_poi_fused_add_mean_std_3.run(buf21, primals_1, buf19, buf22, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_4, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div] triton_poi_fused_add_div_mean_mul_std_sub_4.run(primals_5, primals_1, buf19, buf22, buf21, primals_6, buf23, 64, grid=grid(64), stream=stream0) del buf21 del buf22 del primals_6 buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf24) buf25 = reinterpret_tensor(buf24, (4, 4, 4), (16, 4, 1), 0); del buf24 # reuse buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_5.run(buf25, primals_8, buf29, 64, grid=grid(64), stream=stream0) del primals_8 buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0); del buf26 # reuse # Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add] triton_poi_fused_add_6.run(buf27, buf23, primals_10, 64, grid=grid(64), stream=stream0) del primals_10 buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_5, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] triton_poi_fused_add_div_mean_mul_std_sub_7.run(primals_11, buf27, primals_12, buf28, 64, grid=grid(64), stream=stream0) del primals_12 return (buf28, primals_1, primals_5, primals_11, buf5, buf9, buf13, buf17, buf19, reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf27, primals_9, buf29, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(F, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearReLU(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.feedforward = Feedforward(d_model, d_hidden, activation='relu') self.linear = Linear(d_hidden, d_model) def forward(self, x, padding=None): return self.linear(self.feedforward(x)) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, dropout_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(dropout_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x, padding=None): return self.layernorm(x[0] + self.dropout(self.layer(*x, padding= padding))) class TransformerEncoderLayer(nn.Module): def __init__(self, dimension, n_heads, hidden, dropout): super().__init__() self.selfattn = ResidualBlock(MultiHead(dimension, dimension, n_heads, dropout), dimension, dropout) self.feedforward = ResidualBlock(LinearReLU(dimension, hidden), dimension, dropout) def forward(self, x, padding=None): return self.feedforward(self.selfattn(x, x, x, padding=padding)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dimension': 4, 'n_heads': 4, 'hidden': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tmp0 * tmp5 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tmp11 = tmp6 / tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 buf20 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0) del buf6 buf21 = buf20 del buf20 buf22 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0) del buf18 triton_poi_fused_add_mean_std_3[grid(16)](buf21, primals_1, buf19, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_5, primals_1, buf19, buf22, buf21, primals_6, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_6 buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf24) buf25 = reinterpret_tensor(buf24, (4, 4, 4), (16, 4, 1), 0) del buf24 buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf25, primals_8, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0) del buf26 triton_poi_fused_add_6[grid(64)](buf27, buf23, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_11, buf27, primals_12, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return (buf28, primals_1, primals_5, primals_11, buf5, buf9, buf13, buf17, buf19, reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf27, primals_9, buf29, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3 ), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0)) def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2): super().__init__() if activation is not None: self.activation = getattr(F, activation) else: self.activation = lambda x: x self.linear = Linear(d_in, d_out, bias=bias) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.activation(self.linear(self.dropout(x))) class LinearReLU(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.feedforward = Feedforward(d_model, d_hidden, activation='relu') self.linear = Linear(d_hidden, d_model) def forward(self, x, padding=None): return self.linear(self.feedforward(x)) class Attention(nn.Module): def __init__(self, d_key, dropout_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(dropout_ratio) self.causal = causal def forward(self, query, key, value, padding=None): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and self.causal: tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF dot_products.data.sub_(tri.unsqueeze(0)) if padding is not None: dot_products.data.masked_fill_(padding.unsqueeze(1).expand_as( dot_products), -INF) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False): super().__init__() self.attention = Attention(d_key, dropout_ratio, causal=causal) self.wq = Linear(d_key, d_key, bias=False) self.wk = Linear(d_key, d_key, bias=False) self.wv = Linear(d_value, d_value, bias=False) self.n_heads = n_heads def forward(self, query, key, value, padding=None): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return torch.cat([self.attention(q, k, v, padding=padding) for q, k, v in zip(query, key, value)], -1) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, dropout_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(dropout_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x, padding=None): return self.layernorm(x[0] + self.dropout(self.layer(*x, padding= padding))) class TransformerEncoderLayerNew(nn.Module): def __init__(self, dimension, n_heads, hidden, dropout): super().__init__() self.selfattn = ResidualBlock(MultiHead(dimension, dimension, n_heads, dropout), dimension, dropout) self.feedforward = ResidualBlock(LinearReLU(dimension, hidden), dimension, dropout) def forward(self, input_0): primals_2 = self.selfattn.layer.wq.weight primals_3 = self.selfattn.layer.wk.weight primals_4 = self.selfattn.layer.wv.weight primals_5 = self.selfattn.layernorm.gamma primals_6 = self.selfattn.layernorm.beta primals_7 = self.feedforward.layer.feedforward.linear.weight primals_8 = self.feedforward.layer.feedforward.linear.bias primals_9 = self.feedforward.layer.linear.weight primals_10 = self.feedforward.layer.linear.bias primals_11 = self.feedforward.layernorm.gamma primals_12 = self.feedforward.layernorm.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
cristipp/decaNLP
TransformerEncoderLayer
false
12,235
[ "BSD-3-Clause" ]
0
db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
https://github.com/cristipp/decaNLP/tree/db64df36bf2b1b2ca6946aacf0ee7463ac80c4cb
GradLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/kh/ckhrdiofp2vnn7yvgh2v7jkbya25ekop52fplbescfphsy6di3w6.py # Topologically Sorted Source Nodes: [sub, abs_1, mean, sum_1], Original ATen: [aten.sub, aten.abs, aten.mean, aten.sum] # Source node to ATen node mapping: # abs_1 => abs_1 # mean => mean # sub => sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mean,), kwargs = {}) triton_per_fused_abs_mean_sub_sum_0 = async_compile.triton('triton_per_fused_abs_mean_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, abs_1, mean, sum_1], Original ATen: [aten.sub, aten.abs, aten.mean, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GradLoss(nn.Module): def __init__(self): super(GradLoss, self).__init__() def forward(self, grad_fake, grad_real): return torch.sum(torch.mean(torch.abs(grad_real - grad_fake))) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class GradLossNew(nn.Module): def __init__(self): super(GradLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
GradLoss
false
12,236
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
RMSE_log
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ad/cadjhctvyfojnn7diqrt33hosai3lxiqzixa3xwfvqphtub2drtg.py # Topologically Sorted Source Nodes: [log, log_1, sub, abs_1, pow_1, mean, loss], Original ATen: [aten.log, aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt] # Source node to ATen node mapping: # abs_1 => abs_1 # log => log # log_1 => log_1 # loss => sqrt # mean => mean # pow_1 => pow_1 # sub => sub # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {}) triton_per_fused_abs_log_mean_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_abs_log_mean_pow_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_log_mean_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_log_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = libdevice.sqrt(tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [log, log_1, sub, abs_1, pow_1, mean, loss], Original ATen: [aten.log, aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt] stream0 = get_raw_stream(0) triton_per_fused_abs_log_mean_pow_sqrt_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class RMSE_log(nn.Module): def __init__(self): super(RMSE_log, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.sqrt(torch.mean(torch.abs(torch.log(real) - torch.log( fake)) ** 2)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_log_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = libdevice.sqrt(tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_log_mean_pow_sqrt_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSE_logNew(nn.Module): def __init__(self): super(RMSE_logNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
RMSE_log
false
12,237
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
RMSE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/na/cnazvpb3iyswompqfcfpwk3zj4izllqzganw4g6xl4j3rt4gy6q7.py # Topologically Sorted Source Nodes: [mul, mul_1, sub, abs_1, pow_1, mean, loss], Original ATen: [aten.mul, aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt] # Source node to ATen node mapping: # abs_1 => abs_1 # loss => sqrt # mean => mean # mul => mul # mul_1 => mul_1 # pow_1 => pow_1 # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 10.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 10.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {}) triton_per_fused_abs_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_pow_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp6 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tmp13 = libdevice.sqrt(tmp12) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp13, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, mul_1, sub, abs_1, pow_1, mean, loss], Original ATen: [aten.mul, aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_mul_pow_sqrt_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class RMSE(nn.Module): def __init__(self): super(RMSE, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.sqrt(torch.mean(torch.abs(10.0 * real - 10.0 * fake) ** 2) ) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp6 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tmp13 = libdevice.sqrt(tmp12) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_pow_sqrt_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSENew(nn.Module): def __init__(self): super(RMSENew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
RMSE
false
12,238
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
LayerNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/j7/cj7iyvuy47goxi4xdxqava277l66j6vb5fgnfyrzispb5i2wbvv6.py # Topologically Sorted Source Nodes: [mean, mean_1, sub, mul, std, std_1, add, truediv, add_1], Original ATen: [aten.mean, aten.repeat, aten.sub, aten.mul, aten.std, aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mean_1 => repeat # mul => mul # std => var # std_1 => repeat_1 # sub => sub # truediv => div # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view_1, [1, 1, 4, 4]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %repeat), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 1.0), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view_2, [2]), kwargs = {correction: 1.0}) # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view_3, [1, 1, 4, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat_1, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 0.0), kwargs = {}) triton_per_fused_add_div_mean_mul_repeat_std_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_repeat_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_repeat_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_repeat_std_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = tmp0 - tmp20 tmp22 = 1.0 tmp23 = tmp21 * tmp22 tmp24 = 15.0 tmp25 = tmp18 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp23 / tmp28 tmp30 = 0.0 tmp31 = tmp29 + tmp30 tl.store(out_ptr2 + (r1 + (16*x0)), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, mean_1, sub, mul, std, std_1, add, truediv, add_1], Original ATen: [aten.mean, aten.repeat, aten.sub, aten.mul, aten.std, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_repeat_std_sub_0.run(arg0_1, buf4, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, features, eps=1e-06, gamma=1.0, beta=0.0, learnable= False): super(LayerNorm, self).__init__() if learnable: self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) else: self.gamma = gamma self.beta = beta self.eps = eps def forward(self, x): x_size = x.size() mean = x.view(x_size[0], x_size[1], x_size[2] * x_size[3]).mean(2 ).view(x_size[0], x_size[1], 1, 1).repeat(1, 1, x_size[2], x_size[3]) std = x.view(x_size[0], x_size[1], x_size[2] * x_size[3]).std(2).view( x_size[0], x_size[1], 1, 1).repeat(1, 1, x_size[2], x_size[3]) return self.gamma * (x - mean) / (std + self.eps) + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_repeat_std_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = tmp0 - tmp20 tmp22 = 1.0 tmp23 = tmp21 * tmp22 tmp24 = 15.0 tmp25 = tmp18 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp23 / tmp28 tmp30 = 0.0 tmp31 = tmp29 + tmp30 tl.store(out_ptr2 + (r1 + 16 * x0), tmp31, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_repeat_std_sub_0[grid(16)](arg0_1, buf4, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class LayerNormNew(nn.Module): def __init__(self, features, eps=1e-06, gamma=1.0, beta=0.0, learnable= False): super(LayerNormNew, self).__init__() if learnable: self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) else: self.gamma = gamma self.beta = beta self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cynthiamao98/DepthAwareCNN
LayerNorm
false
12,239
[ "MIT" ]
0
824cffaa4159e3dc7cc251a4a659e35c437bb92c
https://github.com/cynthiamao98/DepthAwareCNN/tree/824cffaa4159e3dc7cc251a4a659e35c437bb92c
L1
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zq/czqqw3nxgbnqgkc6nsaeqcjv2pplknchzva4e4wbsoihrsva3dce.py # Topologically Sorted Source Nodes: [mul, mul_1, sub, abs_1, loss], Original ATen: [aten.mul, aten.sub, aten.abs, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # loss => mean # mul => mul # mul_1 => mul_1 # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 10.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 10.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, mul_1, sub, abs_1, loss], Original ATen: [aten.mul, aten.sub, aten.abs, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class L1(nn.Module): def __init__(self): super(L1, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.mean(torch.abs(10.0 * real - 10.0 * fake)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1New(nn.Module): def __init__(self): super(L1New, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
L1
false
12,240
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/uv/cuvpdztljgdb5yhe3d2yn6ygnf7xg4cnyqeufpxtspmbboqwcx6k.py # Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # exp => exp # exp_1 => exp_1 # exp_2 => exp_3 # invprobs => abs_1, exp_2, full_default, log1p, minimum, neg_4, sub_3 # log => log # loss => add_2 # loss_1 => mul_4 # max_val => clamp_min # mean => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # neg => neg # neg_1 => neg_1 # neg_2 => neg_2 # neg_3 => neg_3 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %sub_2), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %mul_2), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_2,), kwargs = {}) # %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_4,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 4), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%neg, 0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %clamp_min), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %exp_1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %log), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, %add_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {}) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0 = async_compile.triton('triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = -tmp0 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl_math.abs(tmp7) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = 4.0 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp0 * tmp2 tmp19 = tmp0 - tmp18 tmp20 = triton_helpers.maximum(tmp1, tmp8) tmp21 = tmp19 + tmp20 tmp22 = -tmp20 tmp23 = tl_math.exp(tmp22) tmp24 = tmp1 - tmp20 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tmp27 = tl_math.log(tmp26) tmp28 = tmp21 + tmp27 tmp29 = tmp17 * tmp28 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp33 = 256.0 tmp34 = tmp32 / tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp34, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class FocalLoss(nn.Module): def __init__(self, gamma): super().__init__() self.gamma = gamma def forward(self, input, target): if not target.size() == input.size(): raise ValueError( 'Target size ({}) must be the same as input size ({})'. format(target.size(), input.size())) max_val = (-input).clamp(min=0) loss = input - input * target + max_val + ((-max_val).exp() + (- input - max_val).exp()).log() invprobs = F.logsigmoid(-input * (target * 2 - 1)) loss = (invprobs * self.gamma).exp() * loss return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'gamma': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = -tmp0 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl_math.abs(tmp7) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = 4.0 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp0 * tmp2 tmp19 = tmp0 - tmp18 tmp20 = triton_helpers.maximum(tmp1, tmp8) tmp21 = tmp19 + tmp20 tmp22 = -tmp20 tmp23 = tl_math.exp(tmp22) tmp24 = tmp1 - tmp20 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tmp27 = tl_math.log(tmp26) tmp28 = tmp21 + tmp27 tmp29 = tmp17 * tmp28 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp33 = 256.0 tmp34 = tmp32 / tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp34, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class FocalLossNew(nn.Module): def __init__(self, gamma): super().__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dainis-boumber/nlp-loss-functions
FocalLoss
false
12,241
[ "Apache-2.0" ]
0
735d1e74bf9b9705a56cbb718b85448575efb5ee
https://github.com/dainis-boumber/nlp-loss-functions/tree/735d1e74bf9b9705a56cbb718b85448575efb5ee
ConvEncoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/kn/cknyjwkwufnzzf4ya3scui55ownkmt5cdh3hggzwsfe3ch5fshzm.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/p6/cp6s4svoxgnzqeja6pzmabu3asmqyfoaympag6dtphmqncue7fik.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wv/cwvtp6qflpb42kxrujmda5zselv7wvkz3fgp2tryo2ftsisaildr.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ih/cihu7ohoiwwrblocurozhw6ihpzbq4oc43mseo4n6wd7ronp74tw.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5c/c5cs3c3svcrznivu3zzny5tguj65spdtj2aitirh7fijbdkiv4cm.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wm/cwmaacsffuosyjtapv3aqt4pkyqgob4jjpsl2qfwqddn7tfjjwjo.py # Topologically Sorted Source Nodes: [net, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # net => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kt/cktxa4ungjp46xeccvlgy4nvcb4x4hunx3vtik5eswj7ak2rlada.py # Topologically Sorted Source Nodes: [net_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # net_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ky/ckylxdqvllfbkwuaq4pecelkx2s5gcvwuxkx6iawkomgwk6xhngy.py # Topologically Sorted Source Nodes: [net_2, relu_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # net_2 => convolution_2 # relu_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/r7/cr7uq5ur2j4jwzfgtl7exszppe4m5worsfpyxi3aykaqmannjwdx.py # Topologically Sorted Source Nodes: [net_3, relu_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # net_3 => convolution_3 # relu_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ow/cowazwlu4tabjis56afgwzpg3aatyheci4ezqs3pmtxm2jiuqqhs.py # Topologically Sorted Source Nodes: [net_5, relu_4], Original ATen: [aten.mean, aten.relu] # Source node to ATen node mapping: # net_5 => mean # relu_4 => relu_4 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mean,), kwargs = {}) triton_poi_fused_mean_relu_10 = async_compile.triton('triton_poi_fused_mean_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 / tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(in_out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (32, ), (1, )) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (512, ), (1, )) assert_size_stride(primals_12, (128, 512), (512, 1)) assert_size_stride(primals_13, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_1 buf1 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_2, buf1, 96, 9, grid=grid(96, 9), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 2048, 9, grid=grid(2048, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_8 buf5 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_10, buf5, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_10 # Topologically Sorted Source Nodes: [net], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf0, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 31, 31), (30752, 1, 992, 32)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [net, relu], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf7, primals_3, 123008, grid=grid(123008), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [net_1], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 15, 15), (14400, 1, 960, 64)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [net_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf9, primals_5, 57600, grid=grid(57600), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [net_2], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 7, 7), (6272, 1, 896, 128)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [net_2, relu_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf11, primals_7, 25088, grid=grid(25088), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [net_3], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 3, 3), (2304, 1, 768, 256)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [net_3, relu_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf13, primals_9, 9216, grid=grid(9216), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [net_4], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, buf5, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 512, 1, 1), (512, 1, 512, 512)) buf15 = reinterpret_tensor(buf14, (4, 512), (512, 1), 0); del buf14 # reuse # Topologically Sorted Source Nodes: [net_5, relu_4], Original ATen: [aten.mean, aten.relu] triton_poi_fused_mean_relu_10.run(buf15, primals_11, 2048, grid=grid(2048), stream=stream0) del primals_11 buf16 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf15, reinterpret_tensor(primals_12, (512, 128), (1, 512), 0), alpha=1, beta=1, out=buf16) del primals_13 return (buf16, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15, primals_12, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ConvEncoder(nn.Module): """ Simple convolutional encoder network. It consists of 5 convolutional layers, each downsampling the input by a factor of 2, and a final fully-connected layer projecting the output to c_dim dimenions. Args: c_dim (int): output dimension of latent embedding """ def __init__(self, c_dim=128): super().__init__() self.conv0 = nn.Conv2d(3, 32, 3, stride=2) self.conv1 = nn.Conv2d(32, 64, 3, stride=2) self.conv2 = nn.Conv2d(64, 128, 3, stride=2) self.conv3 = nn.Conv2d(128, 256, 3, stride=2) self.conv4 = nn.Conv2d(256, 512, 3, stride=2) self.fc_out = nn.Linear(512, c_dim) self.actvn = nn.ReLU() def forward(self, x): batch_size = x.size(0) net = self.conv0(x) net = self.conv1(self.actvn(net)) net = self.conv2(self.actvn(net)) net = self.conv3(self.actvn(net)) net = self.conv4(self.actvn(net)) net = net.view(batch_size, 512, -1).mean(2) out = self.fc_out(self.actvn(net)) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mean_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 / tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(in_out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (128, 512), (512, 1)) assert_size_stride(primals_13, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) triton_poi_fused_1[grid(96, 9)](primals_2, buf1, 96, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(32768, 9)](primals_8, buf4, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_5[grid(131072, 9)](primals_10, buf5, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = extern_kernels.convolution(buf0, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 31, 31), (30752, 1, 992, 32)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_6[grid(123008)](buf7, primals_3, 123008, XBLOCK=512, num_warps=8, num_stages=1) del primals_3 buf8 = extern_kernels.convolution(buf7, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 15, 15), (14400, 1, 960, 64)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_7[grid(57600)](buf9, primals_5, 57600, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf10 = extern_kernels.convolution(buf9, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 7, 7), (6272, 1, 896, 128)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_8[grid(25088)](buf11, primals_7, 25088, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf12 = extern_kernels.convolution(buf11, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 3, 3), (2304, 1, 768, 256)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_9[grid(9216)](buf13, primals_9, 9216, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf14 = extern_kernels.convolution(buf13, buf5, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 512, 1, 1), (512, 1, 512, 512)) buf15 = reinterpret_tensor(buf14, (4, 512), (512, 1), 0) del buf14 triton_poi_fused_mean_relu_10[grid(2048)](buf15, primals_11, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf16 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_13, buf15, reinterpret_tensor( primals_12, (512, 128), (1, 512), 0), alpha=1, beta=1, out=buf16) del primals_13 return (buf16, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15, primals_12) class ConvEncoderNew(nn.Module): """ Simple convolutional encoder network. It consists of 5 convolutional layers, each downsampling the input by a factor of 2, and a final fully-connected layer projecting the output to c_dim dimenions. Args: c_dim (int): output dimension of latent embedding """ def __init__(self, c_dim=128): super().__init__() self.conv0 = nn.Conv2d(3, 32, 3, stride=2) self.conv1 = nn.Conv2d(32, 64, 3, stride=2) self.conv2 = nn.Conv2d(64, 128, 3, stride=2) self.conv3 = nn.Conv2d(128, 256, 3, stride=2) self.conv4 = nn.Conv2d(256, 512, 3, stride=2) self.fc_out = nn.Linear(512, c_dim) self.actvn = nn.ReLU() def forward(self, input_0): primals_2 = self.conv0.weight primals_3 = self.conv0.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_10 = self.conv4.weight primals_11 = self.conv4.bias primals_12 = self.fc_out.weight primals_13 = self.fc_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
crysoberil/ObjectReconstruction_ONetBased
ConvEncoder
false
12,242
[ "MIT" ]
0
7c15ea8a64ee3647c86b57b16f0c85bd51ccdd47
https://github.com/crysoberil/ObjectReconstruction_ONetBased/tree/7c15ea8a64ee3647c86b57b16f0c85bd51ccdd47
L1_log
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/mc/cmc4ozgvwgocsucfwhs2hewom5iy2ggs7j3nad7lqhtusosy5qfw.py # Topologically Sorted Source Nodes: [log, log_1, sub, abs_1, loss], Original ATen: [aten.log, aten.sub, aten.abs, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # log => log # log_1 => log_1 # loss => mean # sub => sub # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) triton_per_fused_abs_log_mean_sub_0 = async_compile.triton('triton_per_fused_abs_log_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_log_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_log_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = 256.0 tmp10 = tmp8 / tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [log, log_1, sub, abs_1, loss], Original ATen: [aten.log, aten.sub, aten.abs, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_log_mean_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class L1_log(nn.Module): def __init__(self): super(L1_log, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.mean(torch.abs(torch.log(real) - torch.log(fake))) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_log_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = 256.0 tmp10 = tmp8 / tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_log_mean_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1_logNew(nn.Module): def __init__(self): super(L1_logNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
L1_log
false
12,243
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
NormalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/p6/cp6wyjxdroeuryhnrpcw4isdrjj735erxbkt7scmk3itidka32nt.py # Topologically Sorted Source Nodes: [pow_1, sum_1, fake_norm, pow_2, sum_2, real_norm, mul, truediv, mean, sub], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.mul, aten.div, aten.mean, aten.rsub] # Source node to ATen node mapping: # fake_norm => sqrt # mean => mean # mul => mul # pow_1 => pow_1 # pow_2 => pow_2 # real_norm => sqrt_1 # sub => sub # sum_1 => sum_1 # sum_2 => sum_2 # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [-1]), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %sqrt_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%squeeze_1, %mul), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mean), kwargs = {}) triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0 = async_compile.triton('triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_out_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp14 = tmp13 * tmp13 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp12 * tmp24 tmp26 = tmp0 / tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = 16.0 tmp31 = tmp29 / tmp30 tmp32 = 1.0 tmp33 = tmp32 - tmp31 tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp33, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 1), (4, 1, 1), 0), out=buf0) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [pow_1, sum_1, fake_norm, pow_2, sum_2, real_norm, mul, truediv, mean, sub], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.mul, aten.div, aten.mean, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0.run(buf1, buf3, arg0_1, arg1_1, 1, 16, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NormalLoss(nn.Module): def __init__(self): super(NormalLoss, self).__init__() def forward(self, grad_fake, grad_real): prod = (grad_fake[:, :, None, :] @ grad_real[:, :, :, None]).squeeze(-1 ).squeeze(-1) fake_norm = torch.sqrt(torch.sum(grad_fake ** 2, dim=-1)) real_norm = torch.sqrt(torch.sum(grad_real ** 2, dim=-1)) return 1 - torch.mean(prod / (fake_norm * real_norm)) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_out_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp14 = tmp13 * tmp13 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp12 * tmp24 tmp26 = tmp0 / tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = 16.0 tmp31 = tmp29 / tmp30 tmp32 = 1.0 tmp33 = tmp32 - tmp31 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 1), (4, 1, 1), 0), out=buf0) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_div_mean_mul_pow_rsub_sqrt_sum_0[grid(1)](buf1, buf3, arg0_1, arg1_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf1 return buf3, class NormalLossNew(nn.Module): def __init__(self): super(NormalLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
NormalLoss
false
12,244
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vw/cvwqcezjgw4hcr2zvuks3igdsd4iniq3ke5z3ruhzsvxj35eqh2j.py # Topologically Sorted Source Nodes: [attn_1, x, x_1, max_1, sub, e_x, e_x_1, sum_1], Original ATen: [aten.div, aten.clamp, aten.mul, aten.max, aten.sub, aten.exp, aten.sum] # Source node to ATen node mapping: # attn_1 => div # e_x => exp # e_x_1 => mul_1 # max_1 => max_1 # sub => sub # sum_1 => sum_1 # x => clamp_max, clamp_min # x_1 => mul # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 4), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, -15.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 15.0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, %arg2_1), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 2, True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %arg2_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2], True), kwargs = {}) triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = -15.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 15.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = triton_helpers.maximum(tmp10, tmp3) tmp12 = triton_helpers.minimum(tmp11, tmp5) tmp14 = tmp12 * tmp13 tmp15 = triton_helpers.maximum(tmp8, tmp14) tmp17 = tmp16 * tmp1 tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = triton_helpers.minimum(tmp18, tmp5) tmp21 = tmp19 * tmp20 tmp22 = triton_helpers.maximum(tmp15, tmp21) tmp24 = tmp23 * tmp1 tmp25 = triton_helpers.maximum(tmp24, tmp3) tmp26 = triton_helpers.minimum(tmp25, tmp5) tmp28 = tmp26 * tmp27 tmp29 = triton_helpers.maximum(tmp22, tmp28) tmp30 = tmp8 - tmp29 tmp31 = tl_math.exp(tmp30) tmp32 = tmp31 * tmp7 tmp33 = tmp14 - tmp29 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 * tmp13 tmp36 = tmp32 + tmp35 tmp37 = tmp21 - tmp29 tmp38 = tl_math.exp(tmp37) tmp39 = tmp38 * tmp20 tmp40 = tmp36 + tmp39 tmp41 = tmp28 - tmp29 tmp42 = tl_math.exp(tmp41) tmp43 = tmp42 * tmp27 tmp44 = tmp40 + tmp43 tl.store(out_ptr0 + (x0), tmp29, xmask) tl.store(out_ptr1 + (x0), tmp44, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/73/c73ulpsraxahj2gseyz7pjma7y7eg6gvtqpca55bh6jaui66cdc5.py # Topologically Sorted Source Nodes: [attn_1, x, x_1, max_1, sub, e_x, e_x_1, add, softmax], Original ATen: [aten.div, aten.clamp, aten.mul, aten.max, aten.sub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add # attn_1 => div # e_x => exp # e_x_1 => mul_1 # max_1 => max_1 # softmax => div_1 # sub => sub # x => clamp_max, clamp_min # x_1 => mul # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 4), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, -15.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 15.0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, %arg2_1), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 2, True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %arg2_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_mul_sub_1 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_mul_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_mul_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_mul_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp7 = tl.load(in_ptr0 + (x2), xmask) tmp9 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = -15.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 15.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 * tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp11 * tmp7 tmp14 = 1e-06 tmp15 = tmp13 + tmp14 tmp16 = tmp12 / tmp15 tl.store(in_out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [attn_1, x, x_1, max_1, sub, e_x, e_x_1, sum_1], Original ATen: [aten.div, aten.clamp, aten.mul, aten.max, aten.sub, aten.exp, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0.run(buf0, arg2_1, buf1, buf2, 16, grid=grid(16), stream=stream0) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn_1, x, x_1, max_1, sub, e_x, e_x_1, add, softmax], Original ATen: [aten.div, aten.clamp, aten.mul, aten.max, aten.sub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_mul_sub_1.run(buf3, arg2_1, buf1, buf2, 64, grid=grid(64), stream=stream0) del arg2_1 del buf1 del buf2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, arg3_1, out=buf4) del arg3_1 return (buf4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch def masked_softmax(x, m=None, dim=-1): """ Softmax with mask (optional) """ x = torch.clamp(x, min=-15.0, max=15.0) if m is not None: m = m.float() x = x * m e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0]) if m is not None: e_x = e_x * m softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06) return softmax class ScaledDotProductAttention(torch.nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, dropout=0.1): super().__init__() self.temperature = temperature self.dropout = torch.nn.Dropout(dropout) def forward(self, q, k, v, mask): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature attn = masked_softmax(attn, mask, 2) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = -15.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 15.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = triton_helpers.maximum(tmp10, tmp3) tmp12 = triton_helpers.minimum(tmp11, tmp5) tmp14 = tmp12 * tmp13 tmp15 = triton_helpers.maximum(tmp8, tmp14) tmp17 = tmp16 * tmp1 tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = triton_helpers.minimum(tmp18, tmp5) tmp21 = tmp19 * tmp20 tmp22 = triton_helpers.maximum(tmp15, tmp21) tmp24 = tmp23 * tmp1 tmp25 = triton_helpers.maximum(tmp24, tmp3) tmp26 = triton_helpers.minimum(tmp25, tmp5) tmp28 = tmp26 * tmp27 tmp29 = triton_helpers.maximum(tmp22, tmp28) tmp30 = tmp8 - tmp29 tmp31 = tl_math.exp(tmp30) tmp32 = tmp31 * tmp7 tmp33 = tmp14 - tmp29 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 * tmp13 tmp36 = tmp32 + tmp35 tmp37 = tmp21 - tmp29 tmp38 = tl_math.exp(tmp37) tmp39 = tmp38 * tmp20 tmp40 = tmp36 + tmp39 tmp41 = tmp28 - tmp29 tmp42 = tl_math.exp(tmp41) tmp43 = tmp42 * tmp27 tmp44 = tmp40 + tmp43 tl.store(out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr1 + x0, tmp44, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_mul_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp7 = tl.load(in_ptr0 + x2, xmask) tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = -15.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 15.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 * tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp11 * tmp7 tmp14 = 1e-06 tmp15 = tmp13 + tmp14 tmp16 = tmp12 / tmp15 tl.store(in_out_ptr0 + x2, tmp16, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0[grid(16)](buf0, arg2_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf0 del buf0 triton_poi_fused_add_clamp_div_exp_max_mul_sub_1[grid(64)](buf3, arg2_1, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg2_1 del buf1 del buf2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, arg3_1, out=buf4) del arg3_1 return buf4, buf3 def masked_softmax(x, m=None, dim=-1): """ Softmax with mask (optional) """ x = torch.clamp(x, min=-15.0, max=15.0) if m is not None: m = m.float() x = x * m e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0]) if m is not None: e_x = e_x * m softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06) return softmax class ScaledDotProductAttentionNew(torch.nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, dropout=0.1): super().__init__() self.temperature = temperature self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
daiki-kimura/commonsense-rl
ScaledDotProductAttention
false
12,245
[ "Apache-2.0" ]
0
5513926957b6501ce9cfa46f77f8f2c1c4892fa5
https://github.com/daiki-kimura/commonsense-rl/tree/5513926957b6501ce9cfa46f77f8f2c1c4892fa5
SoftWingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/un/cunwpec7dtp6lckksegntrqkwzophx7kdx4optoqpzwrrovam74k.py # Topologically Sorted Source Nodes: [sub, delta, lt, truediv, add, log, mul, add_1, losses, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.div, aten.add, aten.log, aten.mul, aten.where, aten.sum] # Source node to ATen node mapping: # add => add # add_1 => add_1 # delta => abs_1 # log => log # losses => where # lt => lt # mul => mul # sub => sub # sum_1 => sum_1 # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 2.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1.0), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 20.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -30.188758248682007), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %abs_1, %add_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [1, 2]), kwargs = {}) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0 = async_compile.triton('triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 2.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp4 tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tl_math.log(tmp8) tmp10 = 20.0 tmp11 = tmp9 * tmp10 tmp12 = -30.188758248682007 tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp5, tmp3, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + (x3), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yi/cyivqxc32riwmi4nsqtijcqefjxyivwafk4qjnscryb6cwpfnqim.py # Topologically Sorted Source Nodes: [loss, mul_1], Original ATen: [aten.mean, aten.mul] # Source node to ATen node mapping: # loss => mean # mul_1 => mul_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sum_1, [0]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_poi_fused_mean_mul_1 = async_compile.triton('triton_poi_fused_mean_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, delta, lt, truediv, add, log, mul, add_1, losses, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.div, aten.add, aten.log, aten.mul, aten.where, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0.run(arg0_1, arg1_1, buf0, 16, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [loss, mul_1], Original ATen: [aten.mean, aten.mul] triton_poi_fused_mean_mul_1.run(buf0, buf1, 4, grid=grid(4), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class SoftWingLoss(nn.Module): """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face Alignment' Lin et al. TIP'2021. loss = 1. |x| , if |x| < omega1 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 Args: omega1 (float): The first threshold. omega2 (float): The second threshold. epsilon (float): Also referred to as curvature. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega1 = omega1 self.omega2 = omega2 self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / self.epsilon) def criterion(self, pred, target): """Criterion of wingloss. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega1, delta, self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, output, target, target_weight=None): """Forward function. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 2.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp4 tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tl_math.log(tmp8) tmp10 = 20.0 tmp11 = tmp9 * tmp10 tmp12 = -30.188758248682007 tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp5, tmp3, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(16)]( arg0_1, arg1_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_mul_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf0 return buf1, class SoftWingLossNew(nn.Module): """Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face Alignment' Lin et al. TIP'2021. loss = 1. |x| , if |x| < omega1 2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1 Args: omega1 (float): The first threshold. omega2 (float): The second threshold. epsilon (float): Also referred to as curvature. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega1 = omega1 self.omega2 = omega2 self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 / self.epsilon) def criterion(self, pred, target): """Criterion of wingloss. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega1, delta, self.omega2 * torch.log(1.0 + delta / self.epsilon) + self.B) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chenxinfeng4/mmpose
SoftWingLoss
false
12,246
[ "Apache-2.0" ]
0
b0aac4178c1f3d679d2a007e1d9c6c567fc2607d
https://github.com/chenxinfeng4/mmpose/tree/b0aac4178c1f3d679d2a007e1d9c6c567fc2607d
CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/nx/cnxws3q66oexgt7cc7qipch6axzrogfuqs62jltv7afv7kdhibh7.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 20 x0 = xindex % 3600 x4 = (xindex // 3600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ar/carimle5mqx7xfbvwnn44w6mirx7wgqcjxiw32pqiiybbxmwtswv.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets, getitem_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu, [3, 3], [3, 3], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = (xindex // 20) % 20 x2 = (xindex // 400) x5 = xindex x4 = (xindex // 8000) x6 = xindex % 8000 tmp0 = tl.load(in_ptr0 + ((3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (60 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (61 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (62 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (120 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (121 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (122 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + (x5), tmp16, xmask) tl.store(out_ptr1 + (x6 + (8064*x4)), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tt/cttmrtyatnz7lcto4abctw6oe6ah4qsnaagpplrc34enobsw3qnq.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xk/cxka33fsjgumiuk2joc3cs43ddlpebgh572xevw74a3o7munm2c6.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 1280), (1280, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (10, 64), (64, 1)) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 20, 60, 60), (72320, 3616, 60, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 288000, grid=grid(288000), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 20, 20, 20), (8000, 400, 20, 1), torch.float32) buf3 = empty_strided_cuda((4, 20, 20, 20), (8064, 400, 20, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 32000, grid=grid(32000), stream=stream0) buf4 = empty_strided_cuda((25, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0), reinterpret_tensor(primals_4, (1280, 64), (1, 1280), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf5, primals_5, 1600, grid=grid(1600), stream=stream0) del primals_5 buf6 = empty_strided_cuda((25, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_7 buf9 = empty_strided_cuda((25, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_3.run(buf6, buf9, 25, 10, grid=grid(25), stream=stream0) del buf6 return (buf9, primals_1, primals_3, buf1, buf3, reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0), buf5, buf9, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((20, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 1280), (1280, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 8 * 8 * 20) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=-1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 20 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 % 20 x2 = xindex // 400 x5 = xindex x4 = xindex // 8000 x6 = xindex % 8000 tmp0 = tl.load(in_ptr0 + (3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (60 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (61 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (62 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (120 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (121 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (122 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x5, tmp16, xmask) tl.store(out_ptr1 + (x6 + 8064 * x4), tmp41, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 1280), (1280, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (10, 64), (64, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 20, 60, 60), (72320, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(288000)](buf0, primals_2, buf1, 288000, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 20, 20, 20), (8000, 400, 20, 1), torch.float32) buf3 = empty_strided_cuda((4, 20, 20, 20), (8064, 400, 20, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(32000)](buf1, buf2, buf3, 32000, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((25, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0 ), reinterpret_tensor(primals_4, (1280, 64), (1, 1280), 0), out =buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(1600)](buf5, primals_5, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((25, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_7 buf9 = empty_strided_cuda((25, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_3[grid(25)](buf6, buf9, 25, 10, XBLOCK=8, num_warps=2, num_stages=1) del buf6 return buf9, primals_1, primals_3, buf1, buf3, reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0), buf5, buf9, primals_6, primals_4 class CNNNew(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
danielrjiang/Ax
CNN
false
12,247
[ "MIT" ]
0
43014b28683b3037b5c7307869cb9b75ca31ffb6
https://github.com/danielrjiang/Ax/tree/43014b28683b3037b5c7307869cb9b75ca31ffb6
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/in/cin6bebkpfuweyzzgtljy26zh2yhrs7rpusw2jnlmszgn4jg27lx.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone, aten.transpose] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) # %permute_8 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%clone, [0, 2, 1]), kwargs = {}) triton_poi_fused_clone_transpose_0 = async_compile.triton('triton_poi_fused_clone_transpose_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_transpose_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + (4*x1) + (16*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_weights => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_weights => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wd/cwdechbtujfh3khensgj7m65ycmclcmrggkwsxpoa3is2n47bah4.py # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # combined => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %view_1], 2), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/oj/coje6ro7aly3k4hwvxmkcoxi6nwxzpg23gh2inoddo4imx7svkus.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # output_1 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_4 = async_compile.triton('triton_poi_fused_tanh_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6h/c6hrz6skegptbwysr7x2cgs54meeqjj4yiqej4kdtkfcybd253z5.py # Topologically Sorted Source Nodes: [attention_weights_2], Original ATen: [aten.mean] # Source node to ATen node mapping: # attention_weights_2 => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_3, [1]), kwargs = {}) triton_poi_fused_mean_5 = async_compile.triton('triton_poi_fused_mean_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [query_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf10 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone, aten.transpose] stream0 = get_raw_stream(0) triton_poi_fused_clone_transpose_0.run(primals_2, buf1, buf10, 16, 4, grid=grid(16, 4), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous, attention_scores], Original ATen: [aten.clone, aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf1, out=buf2) buf3 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [mix], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf5) buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(buf5, buf0, buf6, 128, grid=grid(128), stream=stream0) del buf0 buf7 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_4.run(buf8, 64, grid=grid(64), stream=stream0) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights_2], Original ATen: [aten.mean] triton_poi_fused_mean_5.run(buf4, buf9, 16, grid=grid(16), stream=stream0) del buf4 return (buf8, buf9, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), buf2, reinterpret_tensor(buf6, (16, 8), (8, 1), 0), buf8, primals_4, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Attention(nn.Module): """ Applies attention mechanism on the `context` using the `query`. **Thank you** to IBM for their initial implementation of :class:`Attention`. Here is their `License <https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__. Args: dimensions (int): Dimensionality of the query and context. attention_type (str, optional): How to compute the attention score: * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` Example: >>> attention = Attention(256) >>> query = torch.randn(5, 1, 256) >>> context = torch.randn(5, 5, 256) >>> output, weights = attention(query, context) >>> output.size() torch.Size([5, 1, 256]) >>> weights.size() torch.Size([5, 1, 5]) """ def __init__(self, query_dim, context_dim, attention_type='general'): super(Attention, self).__init__() if attention_type not in ['dot', 'general']: raise ValueError('Invalid attention type selected.') self.attention_type = attention_type if self.attention_type == 'general': self.linear_in = nn.Linear(query_dim, query_dim, bias=False) if query_dim != context_dim: self.linear_proj = nn.Linear(query_dim, context_dim, bias=False) self.linear_out = nn.Linear(context_dim * 2, context_dim, bias=False) self.softmax = nn.Softmax(dim=-1) self.tanh = nn.Tanh() def forward(self, query, context): """ Args: query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of queries to query the context. context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data overwhich to apply the attention mechanism. Returns: :class:`tuple` with `output` and `weights`: * **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]): Tensor containing the attended features. * **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]): Tensor containing attention weights. """ batch_size, output_len, query_dim = query.size() batch_size, query_len, context_dim = context.size() if self.attention_type == 'general': query = query.reshape(batch_size * output_len, query_dim) query = self.linear_in(query) query = query.reshape(batch_size, output_len, query_dim) if query_dim != context_dim: query = self.linear_proj(query) attention_scores = torch.bmm(query, context.transpose(1, 2). contiguous()) attention_scores = attention_scores.view(batch_size * output_len, query_len) attention_weights = self.softmax(attention_scores) attention_weights = attention_weights.view(batch_size, output_len, query_len) mix = torch.bmm(attention_weights, context) combined = torch.cat((mix, query), dim=2) combined = combined.view(batch_size * output_len, 2 * context_dim) output = self.linear_out(combined).view(batch_size, output_len, context_dim) output = self.tanh(output) attention_weights = attention_weights.mean(dim=1) return output, attention_weights def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'query_dim': 4, 'context_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + 4 * x1 + 16 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_mean_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf10 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_transpose_0[grid(16, 4)](primals_2, buf1, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf1, out=buf2) buf3 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf5) buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](buf5, buf0, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf7 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0) del buf5 extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0) del buf7 triton_poi_fused_tanh_4[grid(64)](buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mean_5[grid(16)](buf4, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 return buf8, buf9, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0 ), buf2, reinterpret_tensor(buf6, (16, 8), (8, 1), 0 ), buf8, primals_4, buf10 class AttentionNew(nn.Module): """ Applies attention mechanism on the `context` using the `query`. **Thank you** to IBM for their initial implementation of :class:`Attention`. Here is their `License <https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__. Args: dimensions (int): Dimensionality of the query and context. attention_type (str, optional): How to compute the attention score: * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` Example: >>> attention = Attention(256) >>> query = torch.randn(5, 1, 256) >>> context = torch.randn(5, 5, 256) >>> output, weights = attention(query, context) >>> output.size() torch.Size([5, 1, 256]) >>> weights.size() torch.Size([5, 1, 5]) """ def __init__(self, query_dim, context_dim, attention_type='general'): super(AttentionNew, self).__init__() if attention_type not in ['dot', 'general']: raise ValueError('Invalid attention type selected.') self.attention_type = attention_type if self.attention_type == 'general': self.linear_in = nn.Linear(query_dim, query_dim, bias=False) if query_dim != context_dim: self.linear_proj = nn.Linear(query_dim, context_dim, bias=False) self.linear_out = nn.Linear(context_dim * 2, context_dim, bias=False) self.softmax = nn.Softmax(dim=-1) self.tanh = nn.Tanh() def forward(self, input_0, input_1): primals_3 = self.linear_in.weight primals_4 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
daiki-kimura/commonsense-rl
Attention
false
12,248
[ "Apache-2.0" ]
0
5513926957b6501ce9cfa46f77f8f2c1c4892fa5
https://github.com/daiki-kimura/commonsense-rl/tree/5513926957b6501ce9cfa46f77f8f2c1c4892fa5
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cg/ccgtkhvdp3a6glg6b5mu3lxdeneo4kivmkphkbwhh3vjc2sj55z6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1616 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 404 x1 = (xindex // 404) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 400, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((400*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 404, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-400) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/h6/ch6kkdnmda5jlqknka6bgvagc6blocikw7guocvqctfwov7ziyw7.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mb/cmbi6bksf7zib3bnzcishhfmrsdkxmfjipgzj6yptuux5vnh4rnq.py # Topologically Sorted Source Nodes: [xs], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # xs => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (300, 404), (404, 1)) assert_size_stride(primals_6, (300, ), (1, )) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 404), (404, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 1616, grid=grid(1616), stream=stream0) del primals_4 buf2 = empty_strided_cuda((4, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (404, 300), (1, 404), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_6, 1200, grid=grid(1200), stream=stream0) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 400), (400, 1), torch.bool) # Topologically Sorted Source Nodes: [xs], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf0, primals_2, buf6, 1600, grid=grid(1600), stream=stream0) del buf0 del primals_2 return (buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, 404), (404, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.relu(self.fcs1(state)) x = torch.cat((xs, action), dim=1) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1616 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 404 x1 = xindex // 404 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 400, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (400 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 404, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-400 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (300, 404), (404, 1)) assert_size_stride(primals_6, (300,), (1,)) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 404), (404, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1616)](buf0, primals_2, primals_4, buf1, 1616, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (404, 300), ( 1, 404), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(1200)](buf3, primals_6, 1200, XBLOCK= 128, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 400), (400, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(1600)](buf0, primals_2, buf6, 1600, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
david-varela/collaboration_and_competition
Critic
false
12,250
[ "MIT" ]
0
a170cc02eb3917af19d6aafa8b37f6089b83c35f
https://github.com/david-varela/collaboration_and_competition/tree/a170cc02eb3917af19d6aafa8b37f6089b83c35f
CuboidPoseHead
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wy/cwye4gfk4ug6fholrtujqkcyckps5nmpvc74fhtecqjpl343zhyh.py # Topologically Sorted Source Nodes: [x_1, x_2, human_poses], Original ATen: [aten._softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # human_poses => sum_2 # x_1 => div, exp, sum_1 # x_2 => mul_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 4), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {}) triton_poi_fused__softmax_mul_sum_0 = async_compile.triton('triton_poi_fused__softmax_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x5), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2, human_poses], Original ATen: [aten._softmax, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torchvision.transforms import functional as F import torch.nn.functional as F class CuboidPoseHead(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super(CuboidPoseHead, self).__init__() self.beta = beta self.loss = nn.L1Loss() def forward(self, heatmap_volumes, grid_coordinates): """ Args: heatmap_volumes (torch.Tensor(NxKxLxWxH)): 3D human pose heatmaps predicted by the network. grid_coordinates (torch.Tensor(Nx(LxWxH)x3)): Coordinates of the grids in the heatmap volumes. Returns: human_poses (torch.Tensor(NxKx3)): Coordinates of human poses. """ batch_size = heatmap_volumes.size(0) channel = heatmap_volumes.size(1) x = heatmap_volumes.reshape(batch_size, channel, -1, 1) x = F.softmax(self.beta * x, dim=2) grid_coordinates = grid_coordinates.unsqueeze(1) x = torch.mul(x, grid_coordinates) human_poses = torch.sum(x, dim=2) return human_poses def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'beta': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x5, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class CuboidPoseHeadNew(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super(CuboidPoseHeadNew, self).__init__() self.beta = beta self.loss = nn.L1Loss() def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chenxinfeng4/mmpose
CuboidPoseHead
false
12,252
[ "Apache-2.0" ]
0
b0aac4178c1f3d679d2a007e1d9c6c567fc2607d
https://github.com/chenxinfeng4/mmpose/tree/b0aac4178c1f3d679d2a007e1d9c6c567fc2607d
Conv1D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/2v/c2vp4wevd4mmk6p3qeilou7zqojjaarvm3pedrgkmrhhjbggkpqu.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # input_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1], [2], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6b/c6b4fzuhqr6po4mr3fkoxz5urvaiarbr7wnj3xgceuajnmdgv2j5.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # input_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 6) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5l/c5l6jadvodsqyv24syonywbwsyqh7csojyw77cza2reoric5dmsh.py # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # input_4 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 8) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fd/cfdi66q67cnkwsa7i3ybprhmlrcg4w2wv4blmajgd6coqyavbbof.py # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.tanh, aten.tanh_backward] # Source node to ATen node mapping: # input_6 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%squeeze_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul), kwargs = {}) triton_poi_fused_tanh_tanh_backward_3 = async_compile.triton('triton_poi_fused_tanh_tanh_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_tanh_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_tanh_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 10) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + (x2), tmp3, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 4, 4, grid=grid(4, 4), stream=stream0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 6), (24, 6, 1)) del buf0 buf2 = reinterpret_tensor(buf1, (4, 6), (6, 1), 0); del buf1 # reuse buf9 = empty_strided_cuda((4, 6), (6, 1), torch.bool) # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf9, 24, grid=grid(24), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 6), (0, 6, 1), 0), primals_4, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 8), (32, 8, 1)) buf4 = reinterpret_tensor(buf3, (4, 8), (8, 1), 0); del buf3 # reuse buf8 = empty_strided_cuda((4, 8), (8, 1), torch.bool) # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf8, 32, grid=grid(32), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 4, 8), (0, 8, 1), 0), primals_6, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (1, 4, 10), (40, 10, 1)) buf6 = reinterpret_tensor(buf5, (4, 10), (10, 1), 0); del buf5 # reuse buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.tanh, aten.tanh_backward] triton_poi_fused_tanh_tanh_backward_3.run(buf6, primals_7, buf7, 40, grid=grid(40), stream=stream0) del primals_7 return (reinterpret_tensor(buf6, (10, 4), (1, 10), 0), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, (1, 4, 4), (4, 1, 4), 0), reinterpret_tensor(buf2, (1, 4, 6), (24, 6, 1), 0), reinterpret_tensor(buf4, (1, 4, 8), (32, 8, 1), 0), buf7, buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from collections import OrderedDict class Conv1D(nn.Module): def __init__(self, embedding_dim, hidden_dim): super(Conv1D, self).__init__() self.convs = nn.Sequential(OrderedDict([('conv1', nn.Conv1d( embedding_dim, hidden_dim, kernel_size=3, stride=1, padding=2)), ('relu1', nn.ReLU()), ('conv2', nn.Conv1d(hidden_dim, hidden_dim, 3, 1, 2)), ('relu2', nn.ReLU()), ('conv3', nn. Conv1d(hidden_dim, hidden_dim, 3, 1, 2)), ('tanh', nn.Tanh())])) def forward(self, embedding): return self.convs(embedding.transpose(-2, -1)).transpose(-2, -1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'embedding_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 6 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(4, 4)](primals_1, buf0, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 6), (24, 6, 1)) del buf0 buf2 = reinterpret_tensor(buf1, (4, 6), (6, 1), 0) del buf1 buf9 = empty_strided_cuda((4, 6), (6, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(24)](buf2, primals_3, buf9, 24, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 6 ), (0, 6, 1), 0), primals_4, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 8), (32, 8, 1)) buf4 = reinterpret_tensor(buf3, (4, 8), (8, 1), 0) del buf3 buf8 = empty_strided_cuda((4, 8), (8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(32)](buf4, primals_5, buf8, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 4, 8 ), (0, 8, 1), 0), primals_6, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (1, 4, 10), (40, 10, 1)) buf6 = reinterpret_tensor(buf5, (4, 10), (10, 1), 0) del buf5 buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_3[grid(40)](buf6, primals_7, buf7, 40, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return reinterpret_tensor(buf6, (10, 4), (1, 10), 0 ), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, ( 1, 4, 4), (4, 1, 4), 0), reinterpret_tensor(buf2, (1, 4, 6), (24, 6, 1), 0), reinterpret_tensor(buf4, (1, 4, 8), (32, 8, 1), 0 ), buf7, buf8, buf9 class Conv1DNew(nn.Module): def __init__(self, embedding_dim, hidden_dim): super(Conv1DNew, self).__init__() self.convs = nn.Sequential(OrderedDict([('conv1', nn.Conv1d( embedding_dim, hidden_dim, kernel_size=3, stride=1, padding=2)), ('relu1', nn.ReLU()), ('conv2', nn.Conv1d(hidden_dim, hidden_dim, 3, 1, 2)), ('relu2', nn.ReLU()), ('conv3', nn. Conv1d(hidden_dim, hidden_dim, 3, 1, 2)), ('tanh', nn.Tanh())])) def forward(self, input_0): primals_2 = self.convs.conv1.weight primals_3 = self.convs.conv1.bias primals_4 = self.convs.conv2.weight primals_5 = self.convs.conv2.bias primals_6 = self.convs.conv3.weight primals_7 = self.convs.conv3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
danielTLevy/PPO-PyTorch
Conv1D
false
12,253
[ "MIT" ]
0
e9f5a34d3cf40135dfdb0ddb082c20f5035e23f7
https://github.com/danielTLevy/PPO-PyTorch/tree/e9f5a34d3cf40135dfdb0ddb082c20f5035e23f7
StyledConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ri/criuvsdl3sferb4bb6ci5zaps3wys7xxcpybz7vfo2ba4q7cuq6c.py # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # demod => rsqrt # mul_2 => mul_2 # pow_1 => pow_1 # sum_1 => sum_1 # weight => mul_3 # weight_1 => mul_4 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.125), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_1), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = (rindex // 16) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/m5/cm56eqdvtyi73jnsik4k6g5v62qofgoqvpcasegx6ljukv6gdjyd.py # Topologically Sorted Source Nodes: [mul_5, out_3, add_2, leaky_relu, out_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # add_2 => add_2 # leaky_relu => gt, mul_6, where # mul_5 => mul_5 # out_3 => add_1 # out_4 => mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %normal_functional), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %mul_5), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %view_5), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_2, %mul_6), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1.4142135623730951), kwargs = {}) triton_poi_fused_add_leaky_relu_mul_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 25 x2 = (xindex // 100) x1 = (xindex // 25) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + (25*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = 1.4142135623730951 tmp14 = tmp12 * tmp13 tl.store(out_ptr0 + (x3), tmp9, xmask) tl.store(out_ptr1 + (x3), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_6, (1, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0; del buf0 # reuse buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_2.run(buf4, primals_5, buf2, buf5, 16, 64, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [noise], Original ATen: [aten.normal_functional] buf8 = torch.ops.aten.normal_functional.default(buf7) del buf7 buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_5, out_3, add_2, leaky_relu, out_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu] triton_poi_fused_add_leaky_relu_mul_3.run(buf6, primals_6, buf9, primals_7, buf10, buf11, 400, grid=grid(400), stream=stream0) del buf6 del primals_6 del primals_7 return (buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): return fused_leaky_relu(input, self.bias, self.negative_slope, self .scale) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise class StyledConv(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True): super().__init__() self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim, upsample=upsample, blur_kernel=blur_kernel, demodulate=demodulate) self.noise = NoiseInjection() self.activate = FusedLeakyReLU(out_channel) def forward(self, input, style, noise=None): out = self.conv(input, style) out = self.noise(out, noise=noise) out = self.activate(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 25 x2 = xindex // 100 x1 = xindex // 25 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + 25 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = 1.4142135623730951 tmp14 = tmp12 * tmp13 tl.store(out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32) buf8 = torch.ops.aten.normal_functional.default(buf7) del buf7 buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32 ) triton_poi_fused_add_leaky_relu_mul_3[grid(400)](buf6, primals_6, buf9, primals_7, buf10, buf11, 400, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_6 del primals_7 return buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10 def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): return fused_leaky_relu(input, self.bias, self.negative_slope, self .scale) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise class StyledConvNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True): super().__init__() self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim, upsample=upsample, blur_kernel=blur_kernel, demodulate=demodulate) self.noise = NoiseInjection() self.activate = FusedLeakyReLU(out_channel) def forward(self, input_0, input_1): primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_6 = self.noise.weight primals_7 = self.activate.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
davidetalon/StyleCLIP
StyledConv
false
12,254
[ "MIT" ]
0
1cbf552b322cd90c417f26a259143382e2b7af8f
https://github.com/davidetalon/StyleCLIP/tree/1cbf552b322cd90c417f26a259143382e2b7af8f
NegativeScaledDotProduct
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/u5/cu5u6mji5tmuwjwnao33ksrbwyr5f7vlre4ywnbhgkfwjxaua7x7.py # Topologically Sorted Source Nodes: [neg, sqrt_d, truediv], Original ATen: [aten.neg, aten.sqrt, aten.div] # Source node to ATen node mapping: # neg => neg # sqrt_d => full_default # truediv => div # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mm,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %full_default), kwargs = {}) triton_poi_fused_div_neg_sqrt_0 = async_compile.triton('triton_poi_fused_div_neg_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_neg_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_neg_sqrt_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = -tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, sqrt_d, truediv], Original ATen: [aten.neg, aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_neg_sqrt_0.run(buf1, 16, grid=grid(16), stream=stream0) return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data.dataloader import torch.nn def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class NegativeScaledDotProduct(torch.nn.Module): def forward(self, a, b): sqrt_d = torch.sqrt(torch.tensor(a.size(-1))) return -dot_product(a, b, normalize=False) / sqrt_d def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_neg_sqrt_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_neg_sqrt_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class NegativeScaledDotProductNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chen-yuxuan/flair
NegativeScaledDotProduct
false
12,255
[ "MIT" ]
0
480d2c9afd66ab8d3bf40a676917e84dba3c4cee
https://github.com/chen-yuxuan/flair/tree/480d2c9afd66ab8d3bf40a676917e84dba3c4cee
GAT
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/i4/ci4j7o62hjlvxysby5leuec4f5mnobz3p5wi5zmgnb6pfgczycms.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_2, %repeat_1], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*((((4*x1) + x0) // 16) % 4)) + (16*((((4*x1) + (64*x2) + x0) // 64) % 4)) + ((((4*x1) + x0) % 16) % 4)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr0 + ((4*(x1 % 4)) + (16*x2) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fy/cfyhpfvlh7v2kamyddf44ycfki2eygiwxnllf3xlbccy7vzxtcnc.py # Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # e => gt # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/c3/cc3jesmqsfkxzdmzwd3u5t52xvkpzl4rtjwuve7z2oe4uqfzknpd.py # Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt] # Source node to ATen node mapping: # gt => gt_1 # Graph fragment: # %gt_1 : [num_users=5] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_4, 0), kwargs = {}) triton_poi_fused_gt_2 = async_compile.triton('triton_poi_fused_gt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/f5/cf5atbk7j66ttv4hfabzy4hlvzlgp4cmnvsrrdv7lu2mgbytrlxj.py # Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax] # Source node to ATen node mapping: # attention => where_1 # attention_1 => amax, exp, sub, sum_1 # attention_10 => amax_3, exp_3, sub_3, sum_4 # attention_3 => where_4 # attention_4 => amax_1, exp_1, sub_1, sum_2 # attention_6 => where_7 # attention_7 => amax_2, exp_2, sub_2, sum_3 # attention_9 => where_10 # e => mul, where # e_1 => mul_5, where_3 # e_2 => mul_10, where_6 # e_3 => mul_15, where_9 # zero_vec => full_default # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {}) # %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_5), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_4, [1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_10), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_7, [1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {}) # %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_15), kwargs = {}) # %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_10, [1], True), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {}) triton_poi_fused__softmax_leaky_relu_mul_where_3 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 36, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask).to(tl.int1) tmp9 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask).to(tl.int1) tmp16 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr2 + (32 + x0), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask).to(tl.int1) tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp24 = tl.load(in_ptr2 + (48 + x0), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp41 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp46 = tl.load(in_ptr4 + (16 + x0), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (32 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp52 = tl.load(in_ptr4 + (32 + x0), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr3 + (48 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp58 = tl.load(in_ptr4 + (48 + x0), xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp75 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp79 = tl.load(in_ptr5 + (16 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp80 = tl.load(in_ptr6 + (16 + x0), xmask, eviction_policy='evict_last') tmp85 = tl.load(in_ptr5 + (32 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp86 = tl.load(in_ptr6 + (32 + x0), xmask, eviction_policy='evict_last') tmp91 = tl.load(in_ptr5 + (48 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp92 = tl.load(in_ptr6 + (48 + x0), xmask, eviction_policy='evict_last') tmp108 = tl.load(in_ptr7 + (x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp109 = tl.load(in_ptr8 + (x0), xmask, eviction_policy='evict_last') tmp113 = tl.load(in_ptr7 + (16 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp114 = tl.load(in_ptr8 + (16 + x0), xmask, eviction_policy='evict_last') tmp119 = tl.load(in_ptr7 + (32 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp120 = tl.load(in_ptr8 + (32 + x0), xmask, eviction_policy='evict_last') tmp125 = tl.load(in_ptr7 + (48 + x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp126 = tl.load(in_ptr8 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tmp42 = tmp41 * tmp3 tmp43 = tl.where(tmp40, tmp41, tmp42) tmp44 = tl.where(tmp0, tmp43, tmp6) tmp47 = tmp46 * tmp3 tmp48 = tl.where(tmp45, tmp46, tmp47) tmp49 = tl.where(tmp8, tmp48, tmp6) tmp50 = triton_helpers.maximum(tmp44, tmp49) tmp53 = tmp52 * tmp3 tmp54 = tl.where(tmp51, tmp52, tmp53) tmp55 = tl.where(tmp15, tmp54, tmp6) tmp56 = triton_helpers.maximum(tmp50, tmp55) tmp59 = tmp58 * tmp3 tmp60 = tl.where(tmp57, tmp58, tmp59) tmp61 = tl.where(tmp22, tmp60, tmp6) tmp62 = triton_helpers.maximum(tmp56, tmp61) tmp63 = tmp44 - tmp62 tmp64 = tl_math.exp(tmp63) tmp65 = tmp49 - tmp62 tmp66 = tl_math.exp(tmp65) tmp67 = tmp64 + tmp66 tmp68 = tmp55 - tmp62 tmp69 = tl_math.exp(tmp68) tmp70 = tmp67 + tmp69 tmp71 = tmp61 - tmp62 tmp72 = tl_math.exp(tmp71) tmp73 = tmp70 + tmp72 tmp76 = tmp75 * tmp3 tmp77 = tl.where(tmp74, tmp75, tmp76) tmp78 = tl.where(tmp0, tmp77, tmp6) tmp81 = tmp80 * tmp3 tmp82 = tl.where(tmp79, tmp80, tmp81) tmp83 = tl.where(tmp8, tmp82, tmp6) tmp84 = triton_helpers.maximum(tmp78, tmp83) tmp87 = tmp86 * tmp3 tmp88 = tl.where(tmp85, tmp86, tmp87) tmp89 = tl.where(tmp15, tmp88, tmp6) tmp90 = triton_helpers.maximum(tmp84, tmp89) tmp93 = tmp92 * tmp3 tmp94 = tl.where(tmp91, tmp92, tmp93) tmp95 = tl.where(tmp22, tmp94, tmp6) tmp96 = triton_helpers.maximum(tmp90, tmp95) tmp97 = tmp78 - tmp96 tmp98 = tl_math.exp(tmp97) tmp99 = tmp83 - tmp96 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp89 - tmp96 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp95 - tmp96 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp110 = tmp109 * tmp3 tmp111 = tl.where(tmp108, tmp109, tmp110) tmp112 = tl.where(tmp0, tmp111, tmp6) tmp115 = tmp114 * tmp3 tmp116 = tl.where(tmp113, tmp114, tmp115) tmp117 = tl.where(tmp8, tmp116, tmp6) tmp118 = triton_helpers.maximum(tmp112, tmp117) tmp121 = tmp120 * tmp3 tmp122 = tl.where(tmp119, tmp120, tmp121) tmp123 = tl.where(tmp15, tmp122, tmp6) tmp124 = triton_helpers.maximum(tmp118, tmp123) tmp127 = tmp126 * tmp3 tmp128 = tl.where(tmp125, tmp126, tmp127) tmp129 = tl.where(tmp22, tmp128, tmp6) tmp130 = triton_helpers.maximum(tmp124, tmp129) tmp131 = tmp112 - tmp130 tmp132 = tl_math.exp(tmp131) tmp133 = tmp117 - tmp130 tmp134 = tl_math.exp(tmp133) tmp135 = tmp132 + tmp134 tmp136 = tmp123 - tmp130 tmp137 = tl_math.exp(tmp136) tmp138 = tmp135 + tmp137 tmp139 = tmp129 - tmp130 tmp140 = tl_math.exp(tmp139) tmp141 = tmp138 + tmp140 tl.store(out_ptr0 + (x2), tmp28, xmask) tl.store(out_ptr1 + (x2), tmp39, xmask) tl.store(out_ptr2 + (x2), tmp62, xmask) tl.store(out_ptr3 + (x2), tmp73, xmask) tl.store(out_ptr4 + (x2), tmp96, xmask) tl.store(out_ptr5 + (x2), tmp107, xmask) tl.store(out_ptr6 + (x2), tmp130, xmask) tl.store(out_ptr7 + (x2), tmp141, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/lu/cludl62f6qo4f34m7ejzpxdcunrj3shfeb7xo4jvuj3n5a4sh6a6.py # Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax] # Source node to ATen node mapping: # attention => where_1 # attention_1 => div, exp, sub # attention_10 => div_3, exp_3, sub_3 # attention_3 => where_4 # attention_4 => div_1, exp_1, sub_1 # attention_6 => where_7 # attention_7 => div_2, exp_2, sub_2 # attention_9 => where_10 # e => mul, where # e_1 => mul_5, where_3 # e_2 => mul_10, where_6 # e_3 => mul_15, where_9 # zero_vec => full_default # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {}) # %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_5), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_10), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_3), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {}) # %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_15), kwargs = {}) # %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_4), kwargs = {}) triton_poi_fused__softmax_leaky_relu_mul_where_4 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*i1', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*i1', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp14 = tl.load(in_ptr6 + (x4), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr7 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr8 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr9 + (x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp24 = tl.load(in_ptr10 + (x4), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr11 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr12 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr13 + (x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp34 = tl.load(in_ptr14 + (x4), xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr15 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr16 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp15 = tmp14 * tmp3 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tl.where(tmp0, tmp16, tmp6) tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp0, tmp26, tmp6) tmp29 = tmp27 - tmp28 tmp30 = tl_math.exp(tmp29) tmp32 = tmp30 / tmp31 tmp35 = tmp34 * tmp3 tmp36 = tl.where(tmp33, tmp34, tmp35) tmp37 = tl.where(tmp0, tmp36, tmp6) tmp39 = tmp37 - tmp38 tmp40 = tl_math.exp(tmp39) tmp42 = tmp40 / tmp41 tl.store(out_ptr0 + (x3), tmp12, xmask) tl.store(out_ptr1 + (x3), tmp22, xmask) tl.store(out_ptr2 + (x3), tmp32, xmask) tl.store(out_ptr3 + (x3), tmp42, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mu/cmu4dmnjb3m2bpsy345zghpbe6uqogqd4h7akjugnavnr5t7dfhe.py # Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.clone] # Source node to ATen node mapping: # h_prime => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/p6/cp6dbvqyf5xgpxxdiwciohgb2ayhhg4kfwvauizjk22u4hoilsvn.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_1 => cat_4 # Graph fragment: # %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where_2, %where_5, %where_8, %where_11], 1), kwargs = {}) triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 16 x0 = xindex % 16 x2 = (xindex // 256) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp18 & xmask, other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp30 & xmask, other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tmp40 = tl.full([1], 16, tl.int64) tmp41 = tmp0 < tmp40 tmp42 = tl.load(in_ptr3 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp39 & xmask, other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + (x3), tmp52, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, buf1, 512, grid=grid(512), stream=stream0) buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt] triton_poi_fused_gt_2.run(primals_4, buf4, 256, grid=grid(256), stream=stream0) del primals_4 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10) del primals_5 buf11 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf10, buf11, 512, grid=grid(512), stream=stream0) buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (64, 8), (8, 1), 0), primals_6, out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [e_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_7, out=buf19) del primals_7 buf20 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf19, buf20, 512, grid=grid(512), stream=stream0) buf21 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf20, (64, 8), (8, 1), 0), primals_8, out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [e_2], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf21, buf22, 64, grid=grid(64), stream=stream0) buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_9, out=buf28) del primals_9 buf29 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf28, buf29, 512, grid=grid(512), stream=stream0) buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf29, (64, 8), (8, 1), 0), primals_10, out=buf30) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [e_3], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf30, buf31, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf32 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax] triton_poi_fused__softmax_leaky_relu_mul_where_3.run(buf4, buf3, buf2, buf13, buf12, buf22, buf21, buf31, buf30, buf5, buf6, buf14, buf15, buf23, buf24, buf32, buf33, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax] triton_poi_fused__softmax_leaky_relu_mul_where_4.run(buf4, buf3, buf2, buf5, buf6, buf13, buf12, buf14, buf15, buf22, buf21, buf23, buf24, buf31, buf30, buf32, buf33, buf7, buf16, buf25, buf34, 256, grid=grid(256), stream=stream0) del buf12 del buf14 del buf15 del buf2 del buf21 del buf23 del buf24 del buf30 del buf32 del buf33 del buf5 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf0, buf8, 256, grid=grid(256), stream=stream0) del buf0 buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf10, buf17, 256, grid=grid(256), stream=stream0) del buf10 buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18) buf26 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf19, buf26, 256, grid=grid(256), stream=stream0) del buf19 buf27 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), out=buf27) buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf28, buf35, 256, grid=grid(256), stream=stream0) del buf28 buf36 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf34, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36) buf37 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] triton_poi_fused_cat_6.run(buf9, buf18, buf27, buf36, buf37, 1024, grid=grid(1024), stream=stream0) return (buf37, buf3, buf4, buf7, buf9, buf13, buf16, buf18, buf22, buf25, buf27, buf31, buf34, buf36, reinterpret_tensor(buf35, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf29, (8, 64), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf20, (8, 64), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf11, (8, 64), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (8, 64), (1, 8), 0), reinterpret_tensor(primals_3, (1, 8), (1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.matmul(input, self.W) N = h.size()[1] batch_size = h.size(0) a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1), h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self. out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout, training=self.training) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 16 x2 = xindex // 128 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + 16 * ((4 * x1 + 64 * x2 + x0) // 64 % 4) + (4 * x1 + x0) % 16 % 4), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask).to(tl.int1) tmp9 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp10 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask).to(tl.int1) tmp16 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr2 + (32 + x0), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask).to(tl.int1) tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp24 = tl.load(in_ptr2 + (48 + x0), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp41 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp46 = tl.load(in_ptr4 + (16 + x0), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp52 = tl.load(in_ptr4 + (32 + x0), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr3 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp58 = tl.load(in_ptr4 + (48 + x0), xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp75 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp79 = tl.load(in_ptr5 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp80 = tl.load(in_ptr6 + (16 + x0), xmask, eviction_policy='evict_last') tmp85 = tl.load(in_ptr5 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp86 = tl.load(in_ptr6 + (32 + x0), xmask, eviction_policy='evict_last') tmp91 = tl.load(in_ptr5 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp92 = tl.load(in_ptr6 + (48 + x0), xmask, eviction_policy='evict_last') tmp108 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp109 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last') tmp113 = tl.load(in_ptr7 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp114 = tl.load(in_ptr8 + (16 + x0), xmask, eviction_policy='evict_last') tmp119 = tl.load(in_ptr7 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp120 = tl.load(in_ptr8 + (32 + x0), xmask, eviction_policy='evict_last') tmp125 = tl.load(in_ptr7 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp126 = tl.load(in_ptr8 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tmp42 = tmp41 * tmp3 tmp43 = tl.where(tmp40, tmp41, tmp42) tmp44 = tl.where(tmp0, tmp43, tmp6) tmp47 = tmp46 * tmp3 tmp48 = tl.where(tmp45, tmp46, tmp47) tmp49 = tl.where(tmp8, tmp48, tmp6) tmp50 = triton_helpers.maximum(tmp44, tmp49) tmp53 = tmp52 * tmp3 tmp54 = tl.where(tmp51, tmp52, tmp53) tmp55 = tl.where(tmp15, tmp54, tmp6) tmp56 = triton_helpers.maximum(tmp50, tmp55) tmp59 = tmp58 * tmp3 tmp60 = tl.where(tmp57, tmp58, tmp59) tmp61 = tl.where(tmp22, tmp60, tmp6) tmp62 = triton_helpers.maximum(tmp56, tmp61) tmp63 = tmp44 - tmp62 tmp64 = tl_math.exp(tmp63) tmp65 = tmp49 - tmp62 tmp66 = tl_math.exp(tmp65) tmp67 = tmp64 + tmp66 tmp68 = tmp55 - tmp62 tmp69 = tl_math.exp(tmp68) tmp70 = tmp67 + tmp69 tmp71 = tmp61 - tmp62 tmp72 = tl_math.exp(tmp71) tmp73 = tmp70 + tmp72 tmp76 = tmp75 * tmp3 tmp77 = tl.where(tmp74, tmp75, tmp76) tmp78 = tl.where(tmp0, tmp77, tmp6) tmp81 = tmp80 * tmp3 tmp82 = tl.where(tmp79, tmp80, tmp81) tmp83 = tl.where(tmp8, tmp82, tmp6) tmp84 = triton_helpers.maximum(tmp78, tmp83) tmp87 = tmp86 * tmp3 tmp88 = tl.where(tmp85, tmp86, tmp87) tmp89 = tl.where(tmp15, tmp88, tmp6) tmp90 = triton_helpers.maximum(tmp84, tmp89) tmp93 = tmp92 * tmp3 tmp94 = tl.where(tmp91, tmp92, tmp93) tmp95 = tl.where(tmp22, tmp94, tmp6) tmp96 = triton_helpers.maximum(tmp90, tmp95) tmp97 = tmp78 - tmp96 tmp98 = tl_math.exp(tmp97) tmp99 = tmp83 - tmp96 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp89 - tmp96 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp95 - tmp96 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp110 = tmp109 * tmp3 tmp111 = tl.where(tmp108, tmp109, tmp110) tmp112 = tl.where(tmp0, tmp111, tmp6) tmp115 = tmp114 * tmp3 tmp116 = tl.where(tmp113, tmp114, tmp115) tmp117 = tl.where(tmp8, tmp116, tmp6) tmp118 = triton_helpers.maximum(tmp112, tmp117) tmp121 = tmp120 * tmp3 tmp122 = tl.where(tmp119, tmp120, tmp121) tmp123 = tl.where(tmp15, tmp122, tmp6) tmp124 = triton_helpers.maximum(tmp118, tmp123) tmp127 = tmp126 * tmp3 tmp128 = tl.where(tmp125, tmp126, tmp127) tmp129 = tl.where(tmp22, tmp128, tmp6) tmp130 = triton_helpers.maximum(tmp124, tmp129) tmp131 = tmp112 - tmp130 tmp132 = tl_math.exp(tmp131) tmp133 = tmp117 - tmp130 tmp134 = tl_math.exp(tmp133) tmp135 = tmp132 + tmp134 tmp136 = tmp123 - tmp130 tmp137 = tl_math.exp(tmp136) tmp138 = tmp135 + tmp137 tmp139 = tmp129 - tmp130 tmp140 = tl_math.exp(tmp139) tmp141 = tmp138 + tmp140 tl.store(out_ptr0 + x2, tmp28, xmask) tl.store(out_ptr1 + x2, tmp39, xmask) tl.store(out_ptr2 + x2, tmp62, xmask) tl.store(out_ptr3 + x2, tmp73, xmask) tl.store(out_ptr4 + x2, tmp96, xmask) tl.store(out_ptr5 + x2, tmp107, xmask) tl.store(out_ptr6 + x2, tmp130, xmask) tl.store(out_ptr7 + x2, tmp141, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr5 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp14 = tl.load(in_ptr6 + x4, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr7 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr8 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr9 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp24 = tl.load(in_ptr10 + x4, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr11 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr12 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr13 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp34 = tl.load(in_ptr14 + x4, xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr15 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp41 = tl.load(in_ptr16 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp15 = tmp14 * tmp3 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tl.where(tmp0, tmp16, tmp6) tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp0, tmp26, tmp6) tmp29 = tmp27 - tmp28 tmp30 = tl_math.exp(tmp29) tmp32 = tmp30 / tmp31 tmp35 = tmp34 * tmp3 tmp36 = tl.where(tmp33, tmp34, tmp35) tmp37 = tl.where(tmp0, tmp36, tmp6) tmp39 = tmp37 - tmp38 tmp40 = tl_math.exp(tmp39) tmp42 = tmp40 / tmp41 tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp22, xmask) tl.store(out_ptr2 + x3, tmp32, xmask) tl.store(out_ptr3 + x3, tmp42, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp18 & xmask, other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp30 & xmask, other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp39 & xmask, other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x3, tmp52, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf0, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_gt_2[grid(256)](primals_4, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10) del primals_5 buf11 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf10, buf11, 512, XBLOCK=256, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (64, 8), (8, 1), 0), primals_6, out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_7, out=buf19) del primals_7 buf20 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf19, buf20, 512, XBLOCK=256, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf20, (64, 8), (8, 1), 0), primals_8, out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf21, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_9, out=buf28) del primals_9 buf29 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf28, buf29, 512, XBLOCK=256, num_warps=4, num_stages=1) buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf29, (64, 8), (8, 1), 0), primals_10, out=buf30) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf32 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(64)](buf4, buf3, buf2, buf13, buf12, buf22, buf21, buf31, buf30, buf5, buf6, buf14, buf15, buf23, buf24, buf32, buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_4[grid(256)](buf4, buf3, buf2, buf5, buf6, buf13, buf12, buf14, buf15, buf22, buf21, buf23, buf24, buf31, buf30, buf32, buf33, buf7, buf16, buf25, buf34, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del buf14 del buf15 del buf2 del buf21 del buf23 del buf24 del buf30 del buf32 del buf33 del buf5 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf0, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf10, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18 ) buf26 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf19, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf19 buf27 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), out=buf27 ) buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf28, buf35, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf28 buf36 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf34, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36 ) buf37 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) triton_poi_fused_cat_6[grid(1024)](buf9, buf18, buf27, buf36, buf37, 1024, XBLOCK=128, num_warps=4, num_stages=1) return (buf37, buf3, buf4, buf7, buf9, buf13, buf16, buf18, buf22, buf25, buf27, buf31, buf34, buf36, reinterpret_tensor(buf35, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf29, (8, 64), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf20, (8, 64), (1, 8), 0), reinterpret_tensor( primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf11, (8, 64), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (8, 64), (1, 8), 0), reinterpret_tensor( primals_3, (1, 8), (1, 1), 0)) class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.matmul(input, self.W) N = h.size()[1] batch_size = h.size(0) a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1), h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self. out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, input_0, input_1): primals_2 = self.attention_0.W primals_3 = self.attention_0.a primals_5 = self.attention_1.W primals_6 = self.attention_1.a primals_7 = self.attention_2.W primals_8 = self.attention_2.a primals_9 = self.attention_3.W primals_10 = self.attention_3.a primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
daiki-kimura/commonsense-rl
GAT
false
12,256
[ "Apache-2.0" ]
0
5513926957b6501ce9cfa46f77f8f2c1c4892fa5
https://github.com/daiki-kimura/commonsense-rl/tree/5513926957b6501ce9cfa46f77f8f2c1c4892fa5
FactorizationMachine
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/px/cpxobq2mr6dwawssy2tzfy7zsnltl2dyiotjfv7ozz5mhnqyjld6.py # Topologically Sorted Source Nodes: [pow_2, sm_sq], Original ATen: [aten.pow, aten.view] # Source node to ATen node mapping: # pow_2 => pow_2 # sm_sq => view_2 # Graph fragment: # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%pow_2, [64, 4]), kwargs = {}) triton_poi_fused_pow_view_0 = async_compile.triton('triton_poi_fused_pow_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_view_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/px/cpxlr4eh5762mt5j36ynozob4f5u2kicyyrhzz4hmhjmadey4uzx.py # Topologically Sorted Source Nodes: [pow_3], Original ATen: [aten.pow] # Source node to ATen node mapping: # pow_3 => pow_3 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) triton_poi_fused_pow_1 = async_compile.triton('triton_poi_fused_pow_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 100 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/g2/cg2efd4fah6c4eghzot45cty64h5tn64b2v2nvz5o5b66oxxulhb.py # Topologically Sorted Source Nodes: [sq_sm, add, mul, add_1, latent], Original ATen: [aten.pow, aten.add, aten.mul, aten.sub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # latent => sub # mul => mul # sq_sm => pow_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %view_5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %view_3), kwargs = {}) triton_poi_fused_add_mul_pow_sub_2 = async_compile.triton('triton_poi_fused_add_mul_pow_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 25) x2 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp9 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp4 * tmp4 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ok/cokpm3wzabxwznxpiptyoofkdtnhuf2tthhdwfcnoxuua4mcvftb.py # Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs, aten.sgn] # Source node to ATen node mapping: # abs_1 => abs_1 # Graph fragment: # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select_1,), kwargs = {}) # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%select_1,), kwargs = {}) triton_poi_fused_abs_sgn_3 = async_compile.triton('triton_poi_fused_abs_sgn_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_sgn_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_sgn_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + (32*x1)), xmask) tmp1 = tl_math.abs(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = tmp2 < tmp0 tmp4 = tmp3.to(tl.int8) tmp5 = tmp0 < tmp2 tmp6 = tmp5.to(tl.int8) tmp7 = tmp4 - tmp6 tmp8 = tmp7.to(tmp0.dtype) tl.store(out_ptr0 + (x2), tmp1, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 25), (25, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (2, 25), (25, 1)) assert_size_stride(primals_6, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 25), (25, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_2, sm_sq], Original ATen: [aten.pow, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_pow_view_0.run(primals_2, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((4, 25), (25, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_3], Original ATen: [aten.pow] triton_poi_fused_pow_1.run(primals_1, buf2, 100, grid=grid(100), stream=stream0) buf3 = empty_strided_cuda((64, 25), (25, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_3, sm_sq], Original ATen: [aten.pow, aten.mm] extern_kernels.mm(buf1, buf2, out=buf3) del buf2 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [lin_reg], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf4) del primals_3 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 25), (400, 100, 25, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [sq_sm, add, mul, add_1, latent], Original ATen: [aten.pow, aten.add, aten.mul, aten.sub] triton_poi_fused_add_mul_pow_sub_2.run(buf5, primals_4, buf4, buf0, 1600, grid=grid(1600), stream=stream0) del buf4 del primals_4 buf6 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf5, (64, 25), (25, 1), 0), reinterpret_tensor(primals_5, (25, 2), (1, 25), 0), alpha=1, beta=1, out=buf6) del primals_6 buf7 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs, aten.sgn] triton_poi_fused_abs_sgn_3.run(buf6, buf7, buf8, 32, grid=grid(32), stream=stream0) return (reinterpret_tensor(buf6, (4, 4, 2), (32, 2, 1), 0), buf7, primals_1, buf0, reinterpret_tensor(buf5, (64, 25), (25, 1), 0), buf8, primals_5, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 25), (25, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, 25), (25, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import math import torch import numpy as np from torch.nn import * from torch.optim import AdamW from typing import Union class FactorizationMachine(Module): """ [Factorization Machine Recommendation Model] Learns latent space features to characterize similarity of dataset features to compute a recommendation as a function of dataset features. Dataset features can be mixed / hybrid such that you can combine information on both the recommended object and the recommendation target to generate an informed similarity or recommendation / ranking metric. """ def __init__(self, data_dim, hidden_dim=25, seed=None) ->None: """ Instantiate class attributes for FM. Constructs a feature similarity matrix F of shape (x_features, hidden_dim) to learn implicit representations of all trainable features in the data for recommendation or ranking. :param data_dim <int>: Number of features to learn from in the dataset. :param hidden_dim <int>: Dimension of the latent space of features. :param seed <int>: Random seed fixture for reproducibility. """ super().__init__() self.input_dim = data_dim self.hidden_dim = hidden_dim self.torch_gen = None if seed is not None: self.torch_gen = torch.manual_seed(seed) """ Matrix Factorization """ self.F = Parameter(torch.empty((self.input_dim, self.hidden_dim)), requires_grad=True) init.xavier_uniform_(self.F) """ Linear Regression """ self.V = Parameter(torch.empty((self.input_dim, 1)), requires_grad=True ) init.xavier_uniform_(self.V) self.bias = Parameter(torch.zeros(1), requires_grad=True) """ Gaussian Regression """ self.gaussian_dist = Linear(in_features=self.hidden_dim, out_features=2 ) def forward(self, x: 'torch.Tensor'): """ Compute FactorizationMachine(x). Returns a mean and standard deviation for the recommendation. :param x <torch.Tensor>: Factorization machine input Tensor of shape (N, input_dim). """ sq_sm = torch.matmul(x, self.F) ** 2 sm_sq = torch.matmul(x ** 2, self.F ** 2) lin_reg = torch.matmul(x, self.V) latent = self.bias + lin_reg + 0.5 * sq_sm - sm_sq output = self.gaussian_dist(latent) return output[:, 0], torch.abs(output[:, 1]) def fit(self, X: 'Union[torch.Tensor, np.ndarray]', Y: 'Union[torch.Tensor, np.ndarray]', mask: 'Union[torch.Tensor, np.ndarray]'=None, cycles=100, lr=0.002, batch_frac=0.01, regularize=0.01, patience=3, verbose=False): """ Train the Factorization Machine. :param X <torch.Tensor>: Input training data features of shape (N, X). :param Y <torch.Tensor>: Target training data class / score vector of shape (N, 1). :param mask <torch.Tensor>: Feature observability mask for X of shape (N, X). :param cycles <int>: Number of gradient descent cycles. :param lr <float>: Learning rate. Re-calibrated to order of values in matrix M. :param batch_frac <float>: Fraction of the dataset to set as the batch size. :param regularize <float>: Weight decay lambda for regularization in AdamW. :param patience <int>: Number of cycles of convergence before termination. :param verbose <bool>: Output training progress information. """ if any([len(X.shape) != 2, len(Y.shape) != 2, mask is not None and mask.shape != X.shape, X.shape[1] != self.input_dim, Y.shape[1] != 1, cycles <= 0, lr <= 0, batch_frac <= 0, regularize < 0]): None return N = X.shape[0] if not torch.is_tensor(X): X = torch.Tensor(X) if not torch.is_tensor(Y): Y = torch.Tensor(Y) mask_tensor = torch.ones(X.shape) if mask is not None: mask_tensor = torch.where(torch.Tensor(mask) != 0, 1, 0) optimizer = AdamW(self.parameters(), lr=lr, weight_decay=regularize) model_opt = dict(self.state_dict()) loss_opt = float('inf') timer = 0 for i in range(cycles): for _ in range(math.ceil(1 / batch_frac)): rand_idx = torch.randint(N, size=(math.ceil(batch_frac * N) ,), generator=self.torch_gen) X_batch = X[rand_idx] Y_batch = Y[rand_idx] mask_batch = mask_tensor[rand_idx] self.zero_grad() Y_mu, Y_sigma = self(X_batch * mask_batch) loss = GaussianNLLLoss()(Y_mu, Y_batch, Y_sigma) loss.sum().backward() optimizer.step() if i % math.ceil(cycles / 5) == 0 and verbose: None if loss.sum().item() < loss_opt: model_opt = dict(self.state_dict()) loss_opt = loss.sum().item() timer = 0 else: timer += 1 if timer > patience: self.load_state_dict(model_opt) break def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'data_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math import numpy as np from torch.nn import * from torch.optim import AdamW from typing import Union assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 100 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_mul_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 25 x2 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp4 * tmp4 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_abs_sgn_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 32 * x1), xmask) tmp1 = tl_math.abs(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = tmp2 < tmp0 tmp4 = tmp3.to(tl.int8) tmp5 = tmp0 < tmp2 tmp6 = tmp5.to(tl.int8) tmp7 = tmp4 - tmp6 tmp8 = tmp7.to(tmp0.dtype) tl.store(out_ptr0 + x2, tmp1, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 25), (25, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (2, 25), (25, 1)) assert_size_stride(primals_6, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 25), (25, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_view_0[grid(256)](primals_2, buf1, 256, XBLOCK =128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 25), (25, 1), torch.float32) triton_poi_fused_pow_1[grid(100)](primals_1, buf2, 100, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 25), (25, 1), torch.float32) extern_kernels.mm(buf1, buf2, out=buf3) del buf2 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf4) del primals_3 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 25), (400, 100, 25, 1), 0) del buf3 triton_poi_fused_add_mul_pow_sub_2[grid(1600)](buf5, primals_4, buf4, buf0, 1600, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_4 buf6 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf5, (64, 25), (25, 1), 0), reinterpret_tensor(primals_5, (25, 2), (1, 25), 0), alpha=1, beta=1, out=buf6) del primals_6 buf7 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_abs_sgn_3[grid(32)](buf6, buf7, buf8, 32, XBLOCK= 32, num_warps=1, num_stages=1) return reinterpret_tensor(buf6, (4, 4, 2), (32, 2, 1), 0 ), buf7, primals_1, buf0, reinterpret_tensor(buf5, (64, 25), (25, 1), 0 ), buf8, primals_5, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0) class FactorizationMachineNew(Module): """ [Factorization Machine Recommendation Model] Learns latent space features to characterize similarity of dataset features to compute a recommendation as a function of dataset features. Dataset features can be mixed / hybrid such that you can combine information on both the recommended object and the recommendation target to generate an informed similarity or recommendation / ranking metric. """ def __init__(self, data_dim, hidden_dim=25, seed=None) ->None: """ Instantiate class attributes for FM. Constructs a feature similarity matrix F of shape (x_features, hidden_dim) to learn implicit representations of all trainable features in the data for recommendation or ranking. :param data_dim <int>: Number of features to learn from in the dataset. :param hidden_dim <int>: Dimension of the latent space of features. :param seed <int>: Random seed fixture for reproducibility. """ super().__init__() self.input_dim = data_dim self.hidden_dim = hidden_dim self.torch_gen = None if seed is not None: self.torch_gen = torch.manual_seed(seed) """ Matrix Factorization """ self.F = Parameter(torch.empty((self.input_dim, self.hidden_dim)), requires_grad=True) init.xavier_uniform_(self.F) """ Linear Regression """ self.V = Parameter(torch.empty((self.input_dim, 1)), requires_grad=True ) init.xavier_uniform_(self.V) self.bias = Parameter(torch.zeros(1), requires_grad=True) """ Gaussian Regression """ self.gaussian_dist = Linear(in_features=self.hidden_dim, out_features=2 ) def fit(self, X: 'Union[torch.Tensor, np.ndarray]', Y: 'Union[torch.Tensor, np.ndarray]', mask: 'Union[torch.Tensor, np.ndarray]'=None, cycles=100, lr=0.002, batch_frac=0.01, regularize=0.01, patience=3, verbose=False): """ Train the Factorization Machine. :param X <torch.Tensor>: Input training data features of shape (N, X). :param Y <torch.Tensor>: Target training data class / score vector of shape (N, 1). :param mask <torch.Tensor>: Feature observability mask for X of shape (N, X). :param cycles <int>: Number of gradient descent cycles. :param lr <float>: Learning rate. Re-calibrated to order of values in matrix M. :param batch_frac <float>: Fraction of the dataset to set as the batch size. :param regularize <float>: Weight decay lambda for regularization in AdamW. :param patience <int>: Number of cycles of convergence before termination. :param verbose <bool>: Output training progress information. """ if any([len(X.shape) != 2, len(Y.shape) != 2, mask is not None and mask.shape != X.shape, X.shape[1] != self.input_dim, Y.shape[1] != 1, cycles <= 0, lr <= 0, batch_frac <= 0, regularize < 0]): None return N = X.shape[0] if not torch.is_tensor(X): X = torch.Tensor(X) if not torch.is_tensor(Y): Y = torch.Tensor(Y) mask_tensor = torch.ones(X.shape) if mask is not None: mask_tensor = torch.where(torch.Tensor(mask) != 0, 1, 0) optimizer = AdamW(self.parameters(), lr=lr, weight_decay=regularize) model_opt = dict(self.state_dict()) loss_opt = float('inf') timer = 0 for i in range(cycles): for _ in range(math.ceil(1 / batch_frac)): rand_idx = torch.randint(N, size=(math.ceil(batch_frac * N) ,), generator=self.torch_gen) X_batch = X[rand_idx] Y_batch = Y[rand_idx] mask_batch = mask_tensor[rand_idx] self.zero_grad() Y_mu, Y_sigma = self(X_batch * mask_batch) loss = GaussianNLLLoss()(Y_mu, Y_batch, Y_sigma) loss.sum().backward() optimizer.step() if i % math.ceil(cycles / 5) == 0 and verbose: None if loss.sum().item() < loss_opt: model_opt = dict(self.state_dict()) loss_opt = loss.sum().item() timer = 0 else: timer += 1 if timer > patience: self.load_state_dict(model_opt) break def forward(self, input_0): primals_1 = self.F primals_3 = self.V primals_4 = self.bias primals_5 = self.gaussian_dist.weight primals_6 = self.gaussian_dist.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
cspades/algorithm-toolkit
FactorizationMachine
false
12,257
[ "Apache-2.0" ]
0
8731112162fb60f8ef3ab3c38524456ae96f0c2d
https://github.com/cspades/algorithm-toolkit/tree/8731112162fb60f8ef3ab3c38524456ae96f0c2d
C2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/35/c35uoqeuk5xgqj55u5y27rwspn52a5jvnsivhhcescwbt7rm6rnh.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 32 x0 = xindex % 3844 x4 = (xindex // 3844) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rc/crcvs7gf5dm7znait3krgjw6vt36tx62fnvewm7rx34jlc5n5fqv.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_3 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = (xindex // 31) % 31 x2 = (xindex // 961) x5 = xindex x4 = (xindex // 30752) x6 = xindex % 30752 tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5), tmp6, xmask) tl.store(out_ptr1 + (x6 + (30848*x4)), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 32, 62, 62), (123904, 3872, 62, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 492032, grid=grid(492032), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1), torch.int8) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 123008, grid=grid(123008), stream=stream0) return (buf2, primals_1, primals_3, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 16, 64, 64), (65536, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from collections import OrderedDict class C2(nn.Module): def __init__(self) ->None: super(C2, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(16, 32, kernel_size=(3, 3), bias=True)), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2))])) def forward(self, img): output = self.c2(img) return output def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 32 x0 = xindex % 3844 x4 = xindex // 3844 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = xindex // 31 % 31 x2 = xindex // 961 x5 = xindex x4 = xindex // 30752 x6 = xindex % 30752 tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 30848 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 32, 62, 62), (123904, 3872, 62, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(492032)](buf0, primals_2, buf1, 492032, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(123008)](buf1, buf2, buf3, 123008, XBLOCK=512, num_warps=8, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C2New(nn.Module): def __init__(self) ->None: super(C2New, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(16, 32, kernel_size=(3, 3), bias=True)), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c2.c2.weight primals_2 = self.c2.c2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
devillove084/DeepSignal
C2
false
12,258
[ "MIT" ]
0
1fe122b32752b11e10ca4bef3d07ddd7de4348b5
https://github.com/devillove084/DeepSignal/tree/1fe122b32752b11e10ca4bef3d07ddd7de4348b5
LinearWithGroupNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ke/ckev4oib3cv2oksvgsnixm2ir66mykxwkb2phwtnkc7jitdkle52.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => add, add_1, mul_1, rsqrt, var_mean # out_2 => relu # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_per_fused_native_group_norm_relu_threshold_backward_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = 0.0 tmp31 = tmp29 <= tmp30 tl.store(out_ptr2 + (r1 + (16*x0)), tmp29, xmask) tl.store(out_ptr3 + (r1 + (16*x0)), tmp31, xmask) tl.store(out_ptr4 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_per_fused_native_group_norm_relu_threshold_backward_0.run(buf0, primals_3, primals_4, buf1, buf5, buf6, buf4, 16, 16, grid=grid(16), stream=stream0) del primals_4 return (buf5, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn from math import gcd import torch.cuda class LinearWithGroupNorm(nn.Module): def __init__(self, n_in: 'int', n_out: 'int', num_groups: 'int'=32, activation: 'bool'=True) ->None: """ Linear layer used in LaneGCN. :param n_in: Number of input channels. :param n_out: Number of output channels. :param num_groups: Number of groups for GroupNorm. :param activation: Boolean indicating whether to apply ReLU activation. """ super().__init__() self.linear = nn.Linear(n_in, n_out, bias=False) self.norm = nn.GroupNorm(gcd(num_groups, n_out), n_out) self.relu = nn.ReLU(inplace=True) self.activation = activation def forward(self, x: 'torch.Tensor') ->torch.Tensor: """ Apply linear layer to input tensor. :param x: Input tensor. :return: Output of linear layer. """ out = self.linear(x) out = self.norm(out) if self.activation: out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_in': 4, 'n_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn from math import gcd import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = 0.0 tmp31 = tmp29 <= tmp30 tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_threshold_backward_0[grid(16)]( buf0, primals_3, primals_4, buf1, buf5, buf6, buf4, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 return buf5, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf6 class LinearWithGroupNormNew(nn.Module): def __init__(self, n_in: 'int', n_out: 'int', num_groups: 'int'=32, activation: 'bool'=True) ->None: """ Linear layer used in LaneGCN. :param n_in: Number of input channels. :param n_out: Number of output channels. :param num_groups: Number of groups for GroupNorm. :param activation: Boolean indicating whether to apply ReLU activation. """ super().__init__() self.linear = nn.Linear(n_in, n_out, bias=False) self.norm = nn.GroupNorm(gcd(num_groups, n_out), n_out) self.relu = nn.ReLU(inplace=True) self.activation = activation def forward(self, input_0): primals_1 = self.linear.weight primals_3 = self.norm.weight primals_4 = self.norm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
bradyz/nuplan-devkit
LinearWithGroupNorm
false
12,259
[ "Apache-2.0" ]
0
0a7a30e5d7fdf3787d9388676b7856fbd7d92992
https://github.com/bradyz/nuplan-devkit/tree/0a7a30e5d7fdf3787d9388676b7856fbd7d92992
C3
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/jo/cjoemxekvdm7olpycabvj2rgijrz7ronctrtfyp2ek5aifdsrxev.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 984064 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 3844) % 64 x0 = xindex % 3844 x3 = (xindex // 3844) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x0 + (3968*x3)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 62, 62), (246016, 3844, 62, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 64, 62, 62), (253952, 3968, 62, 1), torch.bool) # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 984064, grid=grid(984064), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 32, 64, 64), (131072, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from collections import OrderedDict class C3(nn.Module): def __init__(self): super(C3, self).__init__() self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(32, 64, kernel_size=(3, 3), bias=32)), ('relu3', nn.ReLU())])) def forward(self, img): output = self.c3(img) return output def get_inputs(): return [torch.rand([4, 32, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 984064 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 3844 % 64 x0 = xindex % 3844 x3 = xindex // 3844 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x0 + 3968 * x3), tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 62, 62), (246016, 3844, 62, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 64, 62, 62), (253952, 3968, 62, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(984064)]( buf1, primals_2, buf2, 984064, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf2 class C3New(nn.Module): def __init__(self): super(C3New, self).__init__() self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(32, 64, kernel_size=(3, 3), bias=32)), ('relu3', nn.ReLU())])) def forward(self, input_0): primals_1 = self.c3.c3.weight primals_2 = self.c3.c3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
devillove084/DeepSignal
C3
false
12,260
[ "MIT" ]
0
1fe122b32752b11e10ca4bef3d07ddd7de4348b5
https://github.com/devillove084/DeepSignal/tree/1fe122b32752b11e10ca4bef3d07ddd7de4348b5
L2Norm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class L2Norm(nn.Module): def __init__(self): super().__init__() def forward(self, x): assert x.dim( ) == 2, 'the input tensor of L2Norm must be the shape of [B, C]' return F.normalize(x, p=2, dim=-1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class L2NormNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
deokhk/Proxy-Anchor-CVPR2020
L2Norm
false
12,261
[ "MIT" ]
0
acb3a16c3ebc8b8777542898ec83de32aa8ba64e
https://github.com/deokhk/Proxy-Anchor-CVPR2020/tree/acb3a16c3ebc8b8777542898ec83de32aa8ba64e
MaskedMSE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/lc/clcjwavbfgr6c56ur4yy3tcfbja6veotp77jfx2d7wive37vnxp4.py # Topologically Sorted Source Nodes: [mul, add, mask, mul_1, mul_2, mse_loss], Original ATen: [aten.mul, aten.add, aten.div, aten.mse_loss] # Source node to ATen node mapping: # add => add # mask => div # mse_loss => mean, pow_1, sub # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-07), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %div), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %div), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp4 = 1e-07 tmp5 = tmp1 + tmp4 tmp6 = tmp3 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tmp1 * tmp6 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, add, mask, mul_1, mul_2, mse_loss], Original ATen: [aten.mul, aten.add, aten.div, aten.mse_loss] stream0 = get_raw_stream(0) triton_per_fused_add_div_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MaskedMSE(nn.Module): def __init__(self): super(MaskedMSE, self).__init__() self.criterion = nn.MSELoss() def forward(self, input, target, gamma=2.0): mask = gamma * target / (target + 1e-07) self.loss = self.criterion(input * mask, target * mask) return self.loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp4 = 1e-07 tmp5 = tmp1 + tmp4 tmp6 = tmp3 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tmp1 * tmp6 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MaskedMSENew(nn.Module): def __init__(self): super(MaskedMSENew, self).__init__() self.criterion = nn.MSELoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dhruvramani/MaskedMSE
MaskedMSE
false
12,262
[ "MIT" ]
0
76ff94add5659217a3f4f21e60a4f069defede29
https://github.com/dhruvramani/MaskedMSE/tree/76ff94add5659217a3f4f21e60a4f069defede29
C1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/mx/cmxbucu53niw4mhkko67o2ijqxfkqksx6yvmymsdhrqavytlfa2x.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 246016 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 16 x0 = xindex % 3844 x4 = (xindex // 3844) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/an/canlerjerxsnzuoc7eptsqzgtybde4h5qxhdtkr623x3x6ke7ke4.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_3 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = (xindex // 31) % 31 x2 = (xindex // 961) x5 = xindex x4 = (xindex // 15376) x6 = xindex % 15376 tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5), tmp6, xmask) tl.store(out_ptr1 + (x6 + (15488*x4)), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 62, 62), (61504, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 16, 62, 62), (61952, 3872, 62, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 246016, grid=grid(246016), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 16, 31, 31), (15376, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 31, 31), (15488, 961, 31, 1), torch.int8) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 61504, grid=grid(61504), stream=stream0) return (buf2, primals_1, primals_3, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from collections import OrderedDict class C1(nn.Module): def __init__(self) ->None: super(C1, self).__init__() self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(3, 16, kernel_size=(3, 3), bias=True)), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d(kernel_size=(2, 2), stride=2))])) def forward(self, img): output = self.c1(img) return output def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 246016 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 16 x0 = xindex % 3844 x4 = xindex // 3844 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = xindex // 31 % 31 x2 = xindex // 961 x5 = xindex x4 = xindex // 15376 x6 = xindex % 15376 tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 15488 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 62, 62), (61504, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 16, 62, 62), (61952, 3872, 62, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(246016)](buf0, primals_2, buf1, 246016, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 16, 31, 31), (15376, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 31, 31), (15488, 961, 31, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(61504)](buf1, buf2, buf3, 61504, XBLOCK=512, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C1New(nn.Module): def __init__(self) ->None: super(C1New, self).__init__() self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(3, 16, kernel_size=(3, 3), bias=True)), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d(kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c1.c1.weight primals_2 = self.c1.c1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
devillove084/DeepSignal
C1
false
12,263
[ "MIT" ]
0
1fe122b32752b11e10ca4bef3d07ddd7de4348b5
https://github.com/devillove084/DeepSignal/tree/1fe122b32752b11e10ca4bef3d07ddd7de4348b5
ContractingBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xd/cxdkgesb6qc4hbcvbeerj4gwfolzo6uujsc7nr7gmmdljjywjmd6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/4g/c4geyyoagdnztlnrty3v4xfvea35mbe6vwo37gem3ego3h2jcfjn.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit] # Source node to ATen node mapping: # x_1 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused__native_batch_norm_legit_2 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__native_batch_norm_legit_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/om/comqbvfu6kzena7574syetkijeyqi4wev6lku6a222aldfprestn.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_3, buf0, 576, grid=grid(576), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 2, 2), (32, 4, 2, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_2, 128, grid=grid(128), stream=stream0) del primals_2 buf3 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) buf4 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit] triton_poi_fused__native_batch_norm_legit_2.run(buf2, buf3, buf4, 32, grid=grid(32), stream=stream0) buf5 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32) buf6 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf2, buf3, buf4, buf5, buf6, 128, grid=grid(128), stream=stream0) del buf3 del buf4 return (buf5, primals_1, buf0, buf2, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ContractingBlock(nn.Module): def __init__(self, input_channels, use_bn=True, kernel_size=3, activation='relu'): super(ContractingBlock, self).__init__() self.conv1 = nn.Conv2d(input_channels, input_channels * 2, kernel_size=kernel_size, padding=1, stride=2, padding_mode= 'reflect') self.activation = nn.ReLU() if activation == 'relu' else nn.LeakyReLU( 0.2) if use_bn: self.instancenorm = nn.InstanceNorm2d(input_channels * 2) self.use_bn = use_bn def forward(self, x): x = self.conv1(x) if self.use_bn: x = self.instancenorm(x) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_3, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 2, 2), (32, 4, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(128)](buf2, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32 ) buf4 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32 ) triton_poi_fused__native_batch_norm_legit_2[grid(32)](buf2, buf3, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32) buf6 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(128)](buf2, buf3, buf4, buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del buf4 return buf5, primals_1, buf0, buf2, buf6 class ContractingBlockNew(nn.Module): def __init__(self, input_channels, use_bn=True, kernel_size=3, activation='relu'): super(ContractingBlockNew, self).__init__() self.conv1 = nn.Conv2d(input_channels, input_channels * 2, kernel_size=kernel_size, padding=1, stride=2, padding_mode= 'reflect') self.activation = nn.ReLU() if activation == 'relu' else nn.LeakyReLU( 0.2) if use_bn: self.instancenorm = nn.InstanceNorm2d(input_channels * 2) self.use_bn = use_bn def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
diegushko/CycleGAN
ContractingBlock
false
12,264
[ "MIT" ]
0
630d1cd00cef3f09f036d3c734d31c772cc0a786
https://github.com/diegushko/CycleGAN/tree/630d1cd00cef3f09f036d3c734d31c772cc0a786
h_swish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/4n/c4n4t4ob46tjofubvbx7kuypv4fkq77j7kjunchukdfb6wpttdx6.py # Topologically Sorted Source Nodes: [add, hardtanh, truediv, mul], Original ATen: [aten.add, aten.hardtanh, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # hardtanh => clamp_max, clamp_min # mul => mul # truediv => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %div), kwargs = {}) triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tmp9 = tmp0 * tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, hardtanh, truediv, mul], Original ATen: [aten.add, aten.hardtanh, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class h_sigmoid(nn.Module): def __init__(self, inplace=True): super(h_sigmoid, self).__init__() self.relu = nn.ReLU6(inplace=inplace) def forward(self, x): return self.relu(x + 3) / 6 class h_swish(nn.Module): def __init__(self, inplace=True): super(h_swish, self).__init__() self.sigmoid = h_sigmoid(inplace=inplace) def forward(self, x): return x * self.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tmp9 = tmp0 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class h_sigmoid(nn.Module): def __init__(self, inplace=True): super(h_sigmoid, self).__init__() self.relu = nn.ReLU6(inplace=inplace) def forward(self, x): return self.relu(x + 3) / 6 class h_swishNew(nn.Module): def __init__(self, inplace=True): super(h_swishNew, self).__init__() self.sigmoid = h_sigmoid(inplace=inplace) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dhananjaisharma10/mmdetection
h_swish
false
12,265
[ "Apache-2.0" ]
0
6f6db3211c3760cffe9db2350297c42cc29ce140
https://github.com/dhananjaisharma10/mmdetection/tree/6f6db3211c3760cffe9db2350297c42cc29ce140
Mlp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/6s/c6shmuvjmq6zc4ifvdsynorwri47ra63qxa7jg3e7p6lw6xlqj5q.py # Topologically Sorted Source Nodes: [mul, truediv, erf, add, x_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] # Source node to ATen node mapping: # add => add # erf => erf # mul => mul # truediv => div # x_1 => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_add_div_erf_mul_0 = async_compile.triton('triton_poi_fused_add_div_erf_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, truediv, erf, add, x_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data import torch import torch.nn as nn def gelu(x): """ Original Implementation of the gelu activation function in Google Bert repo when initialy created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=gelu, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 def gelu(x): """ Original Implementation of the gelu activation function in Google Bert repo when initialy created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class MlpNew(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=gelu, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
denisleonov/pytorch-CycleGAN-and-pix2pix
Mlp
false
12,266
[ "BSD-3-Clause" ]
0
d1a5f0c5911f70ed896f826619b4067ce737a83d
https://github.com/denisleonov/pytorch-CycleGAN-and-pix2pix/tree/d1a5f0c5911f70ed896f826619b4067ce737a83d
FeatureMapBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xa/cxarfsc3ndk4awaql3b6pgvht3htyheatjaogwixz573s4x5mlpy.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = (xindex // 10) % 10 x2 = (xindex // 100) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-3) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-3) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 7, 7), (196, 49, 7, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_3, buf0, 1600, grid=grid(1600), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf2, primals_1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class FeatureMapBlock(nn.Module): def __init__(self, input_channels, output_channels): super(FeatureMapBlock, self).__init__() self.conv = nn.Conv2d(input_channels, output_channels, kernel_size= 7, padding=3, padding_mode='reflect') def forward(self, x): x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = xindex // 10 % 10 x2 = xindex // 100 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-3 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-3 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 7, 7), (196, 49, 7, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch. float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(1600)](primals_3, buf0, 1600, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, buf0 class FeatureMapBlockNew(nn.Module): def __init__(self, input_channels, output_channels): super(FeatureMapBlockNew, self).__init__() self.conv = nn.Conv2d(input_channels, output_channels, kernel_size= 7, padding=3, padding_mode='reflect') def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
diegushko/CycleGAN
FeatureMapBlock
false
12,267
[ "MIT" ]
0
630d1cd00cef3f09f036d3c734d31c772cc0a786
https://github.com/diegushko/CycleGAN/tree/630d1cd00cef3f09f036d3c734d31c772cc0a786
PrecomputedNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/yi/cyiakmznzllqklcnfmh5hu3smacswbrh4drlxafkndcx6god674a.py # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.25 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PrecomputedNorm(nn.Module): """Normalization using Pre-computed Mean/Std. Args: stats: Precomputed (mean, std). axis: Axis setting used to calculate mean/variance. """ def __init__(self, stats, axis=[1, 2]): super().__init__() self.axis = axis self.mean, self.std = stats def forward(self, X: 'torch.Tensor') ->torch.Tensor: return (X - self.mean) / self.std def __repr__(self): format_string = (self.__class__.__name__ + f'(mean={self.mean}, std={self.std}, axis={self.axis})') return format_string def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'stats': [4, 4]}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.25 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PrecomputedNormNew(nn.Module): """Normalization using Pre-computed Mean/Std. Args: stats: Precomputed (mean, std). axis: Axis setting used to calculate mean/variance. """ def __init__(self, stats, axis=[1, 2]): super().__init__() self.axis = axis self.mean, self.std = stats def __repr__(self): format_string = (self.__class__.__name__ + f'(mean={self.mean}, std={self.std}, axis={self.axis})') return format_string def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
czlwang/s3prl
PrecomputedNorm
false
12,268
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
AMSoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [x_BxH], Original ATen: [aten.div] # Source node to ATen node mapping: # x_BxH => div_1 # Graph fragment: # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand_1), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xe/cxewggzrfqe57dzglxrzfhfgpsywlh36utvtdulp5oi75wfs7ml3.py # Topologically Sorted Source Nodes: [W], Original ATen: [aten.div] # Source node to ATen node mapping: # W => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %expand), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zq/czqoy42ufye2afnopzmn2loqzazgyudvxxmzemheojq4scu6j5lz.py # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] # Source node to ATen node mapping: # numerator => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%diagonal, 0.4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 30.0), kwargs = {}) triton_poi_fused_mul_sub_2 = async_compile.triton('triton_poi_fused_mul_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_BxH], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [W], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_3, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [W, wf], Original ATen: [aten.div, aten.mm] extern_kernels.mm(buf0, buf1, out=buf2) del buf1 buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] triton_poi_fused_mul_sub_2.run(primals_2, buf2, buf3, 4, grid=grid(4), stream=stream0) return (buf3, buf2, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AMSoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLoss, self).__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ assert len(x_BxH) == len(labels_B) assert torch.min(labels_B) >= 0 assert torch.max(labels_B) < self.speaker_num W = F.normalize(self.W, dim=0) x_BxH = F.normalize(x_BxH, dim=1) wf = torch.mm(x_BxH, W) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels_B]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels_B)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'hidden_dim': 4, 'speaker_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_3, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) del buf1 buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_sub_2[grid(4)](primals_2, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf3, buf2, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4 ), (1, 4), 0) class AMSoftmaxLossNew(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLossNew, self).__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, input_0, input_1): primals_1 = self.W primals_3 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
czlwang/s3prl
AMSoftmaxLoss
false
12,269
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
ResidualBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/t6/ct6syu6rq3n7yx3zuog2yujcrfreefdccraqz7zj2m3c5xhvp5vl.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # x => convolution # x_1 => add, rsqrt, var_mean # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp23, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3h/c3hibnn4u5bibelcq2qplcbyvh7ck6url2gw4ck2dtawqpjjpnpp.py # Topologically Sorted Source Nodes: [x_2, pad_1], Original ATen: [aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # pad_1 => _unsafe_index_2, _unsafe_index_3 # x_2 => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_relu_2 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5y/c5yuw24p5eo5tb77i2toneys6cm7y7fkrc7vjo7um2bq3itgdu7q.py # Topologically Sorted Source Nodes: [x_3, x_4, add], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add] # Source node to ATen node mapping: # add => add_2 # x_3 => convolution_1 # x_4 => add_1, rsqrt_1, var_mean_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_3), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_convolution_3 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (r2 + (16*x3)), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp20 = tmp2 - tmp12 tmp21 = 16.0 tmp22 = tmp18 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp19 + tmp26 tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.store(out_ptr2 + (r2 + (16*x3)), tmp27, xmask) tl.store(out_ptr3 + (x3), tmp25, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf6, primals_3, buf3, 16, 16, grid=grid(16), stream=stream0) del primals_3 buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2, pad_1], Original ATen: [aten.relu, aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_relu_2.run(buf2, buf3, buf6, buf7, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8; del buf8 # reuse buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [x_3, x_4, add], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add] triton_per_fused__native_batch_norm_legit_add_convolution_3.run(buf9, primals_5, primals_1, buf10, buf14, buf13, 16, 16, grid=grid(16), stream=stream0) del primals_1 del primals_5 return (buf14, primals_2, primals_4, buf0, buf2, buf3, buf6, buf7, buf9, reinterpret_tensor(buf13, (16, ), (1, ), 0), reinterpret_tensor(buf10, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ResidualBlock(nn.Module): def __init__(self, input_channels): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(input_channels, input_channels, kernel_size= 3, padding=1, padding_mode='reflect') self.conv2 = nn.Conv2d(input_channels, input_channels, kernel_size= 3, padding=1, padding_mode='reflect') self.instancenorm = nn.InstanceNorm2d(input_channels) self.activation = nn.ReLU() def forward(self, x): original_x = x.clone() x = self.conv1(x) x = self.instancenorm(x) x = self.activation(x) x = self.conv2(x) x = self.instancenorm(x) return original_x + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp20 = tmp2 - tmp12 tmp21 = 16.0 tmp22 = tmp18 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp19 + tmp26 tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp27, xmask) tl.store(out_ptr3 + x3, tmp25, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf4 triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2, buf6, primals_3, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_2[grid(576)](buf2, buf3, buf6, buf7, 576, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_convolution_3[grid(16)]( buf9, primals_5, primals_1, buf10, buf14, buf13, 16, 16, XBLOCK =8, num_warps=2, num_stages=1) del primals_1 del primals_5 return (buf14, primals_2, primals_4, buf0, buf2, buf3, buf6, buf7, buf9, reinterpret_tensor(buf13, (16,), (1,), 0), reinterpret_tensor(buf10, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class ResidualBlockNew(nn.Module): def __init__(self, input_channels): super(ResidualBlockNew, self).__init__() self.conv1 = nn.Conv2d(input_channels, input_channels, kernel_size= 3, padding=1, padding_mode='reflect') self.conv2 = nn.Conv2d(input_channels, input_channels, kernel_size= 3, padding=1, padding_mode='reflect') self.instancenorm = nn.InstanceNorm2d(input_channels) self.activation = nn.ReLU() def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
diegushko/CycleGAN
ResidualBlock
false
12,271
[ "MIT" ]
0
630d1cd00cef3f09f036d3c734d31c772cc0a786
https://github.com/diegushko/CycleGAN/tree/630d1cd00cef3f09f036d3c734d31c772cc0a786
SelfAttentionPooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/rg/crg522m3y4v7k4jllgwpydciu6bjqsfnsxrer5whyf4hotsoe5rw.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/h7/ch7ziltjnllhlwal6dz2n67p6gl5e2gojxkzuefleah4glcy25od.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uu/cuuihy3fs6zmxgj73f52xxftscmhwmh6ubigwdpfc7qqdm3hgdhp.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x1 = (xindex // 4) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask) tmp8 = tl.load(in_ptr1 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask) tmp12 = tl.load(in_ptr1 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_2.run(primals_3, buf3, buf4, 64, grid=grid(64), stream=stream0) del buf3 return (buf4, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) def forward(self, batch_rep): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ softmax = nn.functional.softmax att_w = softmax(self.W(batch_rep).squeeze(-1)).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x1 = xindex // 4 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused_mul_sum_2[grid(64)](primals_3, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 return buf4, primals_3, buf1 class SelfAttentionPoolingNew(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPoolingNew, self).__init__() self.W = nn.Linear(input_dim, 1) def forward(self, input_0): primals_1 = self.W.weight primals_2 = self.W.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
czlwang/s3prl
SelfAttentionPooling
false
12,272
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
AP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3v/c3vbbnaoh2ala54xhjzwr7f44xb5tmg7hvdni6ytelrhdlekfg4j.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_8, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/22/c22avtz547ccq6m4s32gquk4cdjjs4eaicgqenyimouechn25s2u.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = (xindex // 64) x5 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + (x7), tmp42, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [feature_BxTxH], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_5, buf8, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_8, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_2.run(buf0, primals_8, buf4, buf5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 return (buf7, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, primals_6, buf8, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class AP(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(AP, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, _ = self.sap_layer(feature_BxTxH, att_mask_BxT) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4, 'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, primals_5, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_8, buf4, buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 return buf7, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, primals_6, buf8, primals_4 class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class APNew(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(APNew, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.sap_layer.W_a.weight primals_5 = self.sap_layer.W_a.bias primals_6 = self.sap_layer.W.weight primals_7 = self.sap_layer.W.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
czlwang/s3prl
AP
false
12,273
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
BertLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/iz/ciztqj6kop3hxov46yrmzprkzfir3eljcic4mkqznz2j5cfeaudr.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float("-inf") tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = (tmp29 != 0) tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = (tmp33 != 0) tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = (tmp38 != 0) tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = (tmp43 != 0) tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) tl.store(out_ptr2 + (x2), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/x5/cx5uvbfethxuwwkwxf3xaualzhlcwqsz4jxqpbhintggaypzjwqf.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1) tmp2 = tl.load(in_out_ptr0 + (x4), xmask) tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [attn_value_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_value_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py # Topologically Sorted Source Nodes: [residual, ln], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # ln => var_mean # residual => add_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/l3/cl3bnd5pv2p4ydfmlj74bv4mbiwr2ntrdvbubnjubetyhosmxag6.py # Topologically Sorted Source Nodes: [residual, ln], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # ln => add_2, add_3, mul, mul_1, rsqrt, sub_1 # residual => add_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/u2/cu2fig7atthxt4e4ppqi7fkzy5tvgusm4oiytjrwoaaw43vzqnis.py # Topologically Sorted Source Nodes: [proj_12], Original ATen: [aten.gelu] # Source node to ATen node mapping: # proj_12 => add_4, erf, mul_2, mul_3, mul_4 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.5), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_3,), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_4), kwargs = {}) triton_poi_fused_gelu_7 = async_compile.triton('triton_poi_fused_gelu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nn/cnnbj7icg3u4gfdofpzdhggwowyp2etfyt3fb2uoi37ho5n4hkgk.py # Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.add] # Source node to ATen node mapping: # residual_1 => add_5 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %add_3), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gz/cgzqe5dke5rie2wa5xpxqomksfdsrkusulax72rkvntne4p5jvyf.py # Topologically Sorted Source Nodes: [ln_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ln_1 => add_6, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [2]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1.0), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/al/cal3txxjlyumb2wxf6pzsp7g5yvv5ygiluv6ygjjzldvb2woph4t.py # Topologically Sorted Source Nodes: [ln_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ln_1 => add_6, add_7, mul_5, mul_6, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [2]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1.0), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %getitem_3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_17), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_18), kwargs = {}) triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, ), (1, )) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf2, primals_7, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf0, primals_3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf1, primals_5, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [attn_value_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [proj_9], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [residual, ln], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(buf13, primals_1, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [residual, ln], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(buf13, primals_1, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_11], Original ATen: [aten.addmm] extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_14 buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_12], Original ATen: [aten.gelu] triton_poi_fused_gelu_7.run(buf17, buf18, 64, grid=grid(64), stream=stream0) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf20, primals_16, buf16, 64, grid=grid(64), stream=stream0) del primals_16 buf21 = buf15; del buf15 # reuse buf22 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [ln_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ln_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_10.run(buf20, buf21, buf22, primals_17, primals_18, buf23, 64, grid=grid(64), stream=stream0) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_11, primals_17, buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf20, primals_15, primals_13, primals_9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn import torch.nn.functional as F class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transform(self, x, linear_layer): bs, seq_len = x.shape[:2] proj = linear_layer(x) proj = proj.view(bs, seq_len, self.num_attention_heads, self. attention_head_size) proj = proj.transpose(1, 2) return proj def attention(self, key, query, value, attention_mask): attention_scores = torch.matmul(query, key.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attn_value = torch.matmul(attention_probs, value) attn_value = attn_value.transpose(1, 2).contiguous() bs, seq_len = attn_value.shape[:2] attn_value = attn_value.view(bs, seq_len, -1) return attn_value def forward(self, hidden_states, attention_mask): key_layer = self.transform(hidden_states, self.key) value_layer = self.transform(hidden_states, self.value) query_layer = self.transform(hidden_states, self.query) attn_value = self.attention(key_layer, query_layer, value_layer, attention_mask) return attn_value class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.self_attention = BertSelfAttention(config) self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size ) self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps= config.layer_norm_eps) self.attention_dropout = nn.Dropout(config.hidden_dropout_prob) self.interm_dense = nn.Linear(config.hidden_size, config. intermediate_size) self.interm_af = F.gelu self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size ) self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.out_dropout = nn.Dropout(config.hidden_dropout_prob) def add_norm(self, input, output, dense_layer, dropout, ln_layer): proj = dense_layer(output) proj = dropout(proj) residual = proj + input ln = ln_layer(residual) return ln def forward(self, hidden_states, attention_mask): attn_outputs = self.self_attention(hidden_states, attention_mask) attn_outputs = self.add_norm(hidden_states, attn_outputs, self. attention_dense, self.attention_dropout, self.attention_layer_norm) proj = self.interm_dense(attn_outputs) proj = self.interm_af(proj) layer_output = self.add_norm(attn_outputs, proj, self.out_dense, self.out_dropout, self.out_layer_norm) return layer_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(num_attention_heads=4, hidden_size= 4, attention_probs_dropout_prob=0.5, layer_norm_eps=1, hidden_dropout_prob=0.5, intermediate_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf2, primals_7, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf2 triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf1, primals_5, buf10, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf13, primals_1, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf13, primals_1, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_14 buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_gelu_7[grid(64)](buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, primals_16, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return buf23, primals_1, primals_11, primals_17, buf9, reinterpret_tensor( buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0 ), buf17, reinterpret_tensor(buf18, (16, 4), (4, 1), 0 ), buf20, primals_15, primals_13, primals_9 class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transform(self, x, linear_layer): bs, seq_len = x.shape[:2] proj = linear_layer(x) proj = proj.view(bs, seq_len, self.num_attention_heads, self. attention_head_size) proj = proj.transpose(1, 2) return proj def attention(self, key, query, value, attention_mask): attention_scores = torch.matmul(query, key.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attn_value = torch.matmul(attention_probs, value) attn_value = attn_value.transpose(1, 2).contiguous() bs, seq_len = attn_value.shape[:2] attn_value = attn_value.view(bs, seq_len, -1) return attn_value def forward(self, hidden_states, attention_mask): key_layer = self.transform(hidden_states, self.key) value_layer = self.transform(hidden_states, self.value) query_layer = self.transform(hidden_states, self.query) attn_value = self.attention(key_layer, query_layer, value_layer, attention_mask) return attn_value class BertLayerNew(nn.Module): def __init__(self, config): super().__init__() self.self_attention = BertSelfAttention(config) self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size ) self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps= config.layer_norm_eps) self.attention_dropout = nn.Dropout(config.hidden_dropout_prob) self.interm_dense = nn.Linear(config.hidden_size, config. intermediate_size) self.interm_af = F.gelu self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size ) self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.out_dropout = nn.Dropout(config.hidden_dropout_prob) def add_norm(self, input, output, dense_layer, dropout, ln_layer): proj = dense_layer(output) proj = dropout(proj) residual = proj + input ln = ln_layer(residual) return ln def forward(self, input_0, input_1): primals_2 = self.self_attention.query.weight primals_3 = self.self_attention.query.bias primals_4 = self.self_attention.key.weight primals_5 = self.self_attention.key.bias primals_6 = self.self_attention.value.weight primals_7 = self.self_attention.value.bias primals_9 = self.attention_dense.weight primals_10 = self.attention_dense.bias primals_11 = self.attention_layer_norm.weight primals_12 = self.attention_layer_norm.bias primals_13 = self.interm_dense.weight primals_14 = self.interm_dense.bias primals_15 = self.out_dense.weight primals_16 = self.out_dense.bias primals_17 = self.out_layer_norm.weight primals_18 = self.out_layer_norm.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
brendon-boldt/minbert-assignment
BertLayer
false
12,274
[ "Apache-2.0" ]
0
0b562d791d34a40fd3c0383a0a32b4eeb2171cb5
https://github.com/brendon-boldt/minbert-assignment/tree/0b562d791d34a40fd3c0383a0a32b4eeb2171cb5
AdMSoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.div] # Source node to ATen node mapping: # x => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand_1), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rd/crdggc56dw4i2v6iocdkenrwijoptevzhgfj4622ke7pcflpdwxy.py # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] # Source node to ATen node mapping: # numerator => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%diagonal, 0.4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 30.0), kwargs = {}) triton_poi_fused_mul_sub_1 = async_compile.triton('triton_poi_fused_mul_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (x0 + (16*tmp4) + (64*x1)), xmask) tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wf], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] triton_poi_fused_mul_sub_1.run(primals_2, buf1, buf2, 64, grid=grid(64), stream=stream0) return (buf2, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AdMSoftmaxLoss(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super(AdMSoftmaxLoss, self).__init__() self.s = s self.m = m self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) def forward(self, x, labels): """ input shape (N, in_features) """ assert len(x) == len(labels) assert torch.min(labels) >= 0 assert torch.max(labels) < self.out_features for W in self.fc.parameters(): W = F.normalize(W, dim=1) x = F.normalize(x, dim=1) wf = self.fc(x) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_mul_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (x0 + 16 * tmp4 + 64 * x1), xmask) tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (4, 1, 16), torch.float32) triton_poi_fused_mul_sub_1[grid(64)](primals_2, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class AdMSoftmaxLossNew(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super(AdMSoftmaxLossNew, self).__init__() self.s = s self.m = m self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) def forward(self, input_0, input_1): primals_3 = self.fc.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
czlwang/s3prl
AdMSoftmaxLoss
false
12,275
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
SoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/t2/ct2dbabladhyyceg2gmfqrslgo4edv7x6gs7iscumud7suileuje.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # loss => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_4), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits_BxSpn], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf3, buf1, primals_4, 1, 256, grid=grid(1), stream=stream0) del buf1 return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super(SoftmaxLoss, self).__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ logits_BxSpn = self.fc(x_BxH) loss = self.loss(logits_BxSpn, labels_B) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4, 'speaker_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf3, buf1, primals_4, 1, 256, num_warps=2, num_stages=1) del buf1 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class SoftmaxLossNew(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super(SoftmaxLossNew, self).__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def forward(self, input_0, input_1): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
czlwang/s3prl
SoftmaxLoss
false
12,276
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
CMVN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/26/c26p57rplx7r3zwtutlkvbcvhikrbu7vi4bucfr3i7hdqgaql6n2.py # Topologically Sorted Source Nodes: [mean, sub, std, add, truediv], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mean => mean # std => sqrt, var # sub => sub # truediv => div # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1, [2]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) triton_poi_fused_add_div_mean_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-10 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tl.store(out_ptr0 + (x3), tmp27, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, sub, std, add, truediv], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class CMVN(nn.Module): __constants__ = ['mode', 'dim', 'eps'] def __init__(self, mode='global', dim=2, eps=1e-10): super(CMVN, self).__init__() if mode != 'global': raise NotImplementedError( 'Only support global mean variance normalization.') self.mode = mode self.dim = dim self.eps = eps def forward(self, x): if self.mode == 'global': return (x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std (self.dim, keepdim=True)) def extra_repr(self): return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-10 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tl.store(out_ptr0 + x3, tmp27, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CMVNNew(nn.Module): __constants__ = ['mode', 'dim', 'eps'] def __init__(self, mode='global', dim=2, eps=1e-10): super(CMVNNew, self).__init__() if mode != 'global': raise NotImplementedError( 'Only support global mean variance normalization.') self.mode = mode self.dim = dim self.eps = eps def extra_repr(self): return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
czlwang/s3prl
CMVN
false
12,277
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
ASP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3v/c3vbbnaoh2ala54xhjzwr7f44xb5tmg7hvdni6ytelrhdlekfg4j.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_8, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ec/cecsiupxfmmb3m5bbivf5bciiq2k5ishu3qab2b2u6oieray7t5b.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %view_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x5 = (xindex // 4) % 64 x7 = (xindex // 16) x8 = xindex % 256 x9 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x7), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x7), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + (x8), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x9), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pk/cpky4ikacz2hgolxoehy6foq57gecnbejjobg6du4jubou3tbafn.py # Topologically Sorted Source Nodes: [mul, utter_rep, mul_2, sum_2, pow_1, sub, add_1, variance], Original ATen: [aten.mul, aten.sum, aten.pow, aten.sub, aten.add, aten.sqrt] # Source node to ATen node mapping: # add_1 => add_1 # mul => mul # mul_2 => mul_2 # pow_1 => pow_1 # sub => sub_1 # sum_2 => sum_3 # utter_rep => sum_2 # variance => sqrt # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %view_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %pow_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-08), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%sqrt, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_2, 2.0), kwargs = {}) triton_poi_fused_add_mul_pow_sqrt_sub_sum_3 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_sqrt_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x6 = xindex % 64 x3 = (xindex // 64) x4 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x0 = xindex % 4 x5 = (xindex // 4) x8 = xindex tmp0 = tl.load(in_ptr0 + (x6), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x6), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x4), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x6), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x4), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr5 + (x6 + (256*x3)), xmask) tmp45 = tl.load(in_ptr5 + (64 + x6 + (256*x3)), xmask) tmp48 = tl.load(in_ptr5 + (128 + x6 + (256*x3)), xmask) tmp51 = tl.load(in_ptr5 + (192 + x6 + (256*x3)), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tmp44 = tmp43 * tmp0 tmp46 = tmp45 * tmp10 tmp47 = tmp44 + tmp46 tmp49 = tmp48 * tmp21 tmp50 = tmp47 + tmp49 tmp52 = tmp51 * tmp32 tmp53 = tmp50 + tmp52 tmp54 = tmp42 * tmp42 tmp55 = tmp53 - tmp54 tmp56 = 1e-08 tmp57 = tmp55 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = 2.0 tmp60 = tmp58 * tmp59 tmp61 = tmp42 * tmp59 tl.store(out_ptr0 + (x0 + (8*x5)), tmp42, xmask) tl.store(out_ptr2 + (x0 + (8*x5)), tmp58, xmask) tl.store(out_ptr3 + (x8), tmp60, xmask) tl.store(out_ptr4 + (x8), tmp61, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [feature_BxTxH], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_5, buf14, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_8, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_8, buf4, buf5, buf6, buf0, buf8, 1024, grid=grid(1024), stream=stream0) buf11 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 0) # alias buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 4) # alias buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep, mul_2, sum_2, pow_1, sub, add_1, variance], Original ATen: [aten.mul, aten.sum, aten.pow, aten.sub, aten.add, aten.sqrt] triton_poi_fused_add_mul_pow_sqrt_sub_sum_3.run(buf0, primals_8, buf4, buf5, buf6, buf8, buf7, buf10, buf12, buf13, 256, grid=grid(256), stream=stream0) del buf5 del buf6 return (buf11, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, buf8, buf12, buf13, primals_6, buf14, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class ASP(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(ASP, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, att_w = self.ap_layer(feature_BxTxH, att_mask_BxT) variance = torch.sqrt(torch.sum(att_w * feature_BxTxH * feature_BxTxH, dim=1) - sap_vec ** 2 + 1e-08) statistic_pooling = torch.cat([sap_vec, variance], dim=-1) return statistic_pooling def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4, 'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex // 4 % 64 x7 = xindex // 16 x8 = xindex % 256 x9 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x7, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x7, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x8, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x9, tmp9, xmask) @triton.jit def triton_poi_fused_add_mul_pow_sqrt_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x6 = xindex % 64 x3 = xindex // 64 x4 = xindex // 4 % 16 x2 = xindex // 16 % 4 x0 = xindex % 4 x5 = xindex // 4 x8 = xindex tmp0 = tl.load(in_ptr0 + x6, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x6), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x4), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x6), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x4), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr5 + (x6 + 256 * x3), xmask) tmp45 = tl.load(in_ptr5 + (64 + x6 + 256 * x3), xmask) tmp48 = tl.load(in_ptr5 + (128 + x6 + 256 * x3), xmask) tmp51 = tl.load(in_ptr5 + (192 + x6 + 256 * x3), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tmp44 = tmp43 * tmp0 tmp46 = tmp45 * tmp10 tmp47 = tmp44 + tmp46 tmp49 = tmp48 * tmp21 tmp50 = tmp47 + tmp49 tmp52 = tmp51 * tmp32 tmp53 = tmp50 + tmp52 tmp54 = tmp42 * tmp42 tmp55 = tmp53 - tmp54 tmp56 = 1e-08 tmp57 = tmp55 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = 2.0 tmp60 = tmp58 * tmp59 tmp61 = tmp42 * tmp59 tl.store(out_ptr0 + (x0 + 8 * x5), tmp42, xmask) tl.store(out_ptr2 + (x0 + 8 * x5), tmp58, xmask) tl.store(out_ptr3 + x8, tmp60, xmask) tl.store(out_ptr4 + x8, tmp61, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, primals_5, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(1024)](primals_8, buf4, buf5, buf6, buf0, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32 ) buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 0) buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 4) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_pow_sqrt_sub_sum_3[grid(256)](buf0, primals_8, buf4, buf5, buf6, buf8, buf7, buf10, buf12, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 return buf11, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, buf8, buf12, buf13, primals_6, buf14, primals_4 class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class ASPNew(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(ASPNew, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.ap_layer.W_a.weight primals_5 = self.ap_layer.W_a.bias primals_6 = self.ap_layer.W.weight primals_7 = self.ap_layer.W.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
czlwang/s3prl
ASP
false
12,278
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
ChannelNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/pd/cpdkd7oy3g2qlxint6tsd7foiux4zykryk7wd4pgiuuqhuclbvui.py # Topologically Sorted Source Nodes: [cumMean, cumVar, sub, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # cumMean => mean # cumVar => var # mul_1 => mul_1 # rsqrt => rsqrt # sub => sub # x => mul # x_1 => add_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 1, keepdim: True}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_add_mean_mul_rsqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_mean_mul_rsqrt_sub_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_rsqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = (xindex // 64) x5 = xindex % 16 x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp10 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x4), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cumMean, cumVar, sub, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.rsqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_sub_var_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ChannelNorm(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super(ChannelNorm, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, x): cumMean = x.mean(dim=1, keepdim=True) cumVar = x.var(dim=1, keepdim=True) x = (x - cumMean) * torch.rsqrt(cumVar + self.epsilon) if self.weight is not None: x = x * self.weight + self.bias return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'numFeatures': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = xindex // 64 x5 = xindex % 16 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp10 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_sub_var_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class ChannelNormNew(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super(ChannelNormNew, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
czlwang/s3prl
ChannelNorm
false
12,279
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3r/c3rfy3ljjc2bfodnr5gm65jr7ew6v6kno6w6jzahlupuqxbpvfkw.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] # Source node to ATen node mapping: # x => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/aw/cawvwx3nv7ipnpnf2hcgwz5usu7vsw5yynj5ofrunhktjwqff5vq.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] # Source node to ATen node mapping: # x => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/p5/cp5wuljbdcz2dl2xvl4imkn5wmtmrnbb7mnld5glztiqavldlheh.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vv/cvvhis67uzj3m3ebbd4sgghaemqhihabasphltk5wytqdd6fe74t.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_3 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5y/c5yhyv7emyc7i2ozpvns6tsiqcvdzktqqpohy4sedfe7aihkojch.py # Topologically Sorted Source Nodes: [x_3, x_5, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => var_mean_1 # x_3 => add_2 # x_5 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xj/cxjpr2ute76xkk7edg7qlvolks2ggx2xwbrttteralhmvd2xsktw.py # Topologically Sorted Source Nodes: [x_3, x_5, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2 # x_3 => add_2 # x_5 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {}) triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/f2/cf27w54efn636nm2ik2o3453oyk5bxdkax3pylw374oelyemoky5.py # Topologically Sorted Source Nodes: [mul_1, truediv, erf, add_1, x_7], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] # Source node to ATen node mapping: # add_1 => add_6 # erf => erf # mul_1 => mul_5 # truediv => div_1 # x_7 => mul_6 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_13, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div_1,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_6), kwargs = {}) triton_poi_fused_add_div_erf_mul_10 = async_compile.triton('triton_poi_fused_add_div_erf_mul_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_mul_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pu/cpuql3oz4hmaygynopg7lq7xhfiv7hr7pr4vyzhfpmw34jymdp7q.py # Topologically Sorted Source Nodes: [x_3, x_5, x_11], Original ATen: [aten.add] # Source node to ATen node mapping: # x_11 => add_7 # x_3 => add_2 # x_5 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_15), kwargs = {}) triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, ), (1, )) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_3, x_5, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3, x_5, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, truediv, erf, add_1, x_7], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] triton_poi_fused_add_div_erf_mul_10.run(buf16, buf17, 256, grid=grid(256), stream=stream0) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [x_3, x_5, x_11], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf19, primals_3, buf12, primals_6, primals_12, 64, grid=grid(64), stream=stream0) del primals_12 return (buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data import torch import torch.nn as nn def gelu(x): """ Original Implementation of the gelu activation function in Google Bert repo when initialy created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class matmul(nn.Module): def __init__(self): super().__init__() def forward(self, x1, x2): x = x1 @ x2 return x class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=gelu, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.mat = matmul() def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = self.mat(q, k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = self.mat(attn, v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer= gelu, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_add_div_erf_mul_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12, primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0) del buf7 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_add_div_erf_mul_10[grid(256)](buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12, primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0 ), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4 def gelu(x): """ Original Implementation of the gelu activation function in Google Bert repo when initialy created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class matmul(nn.Module): def __init__(self): super().__init__() def forward(self, x1, x2): x = x1 @ x2 return x class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=gelu, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.mat = matmul() def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = self.mat(q, k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = self.mat(attn, v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class BlockNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer= gelu, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, input_0): primals_1 = self.norm1.weight primals_2 = self.norm1.bias primals_4 = self.attn.qkv.weight primals_5 = self.attn.proj.weight primals_6 = self.attn.proj.bias primals_7 = self.norm2.weight primals_8 = self.norm2.bias primals_9 = self.mlp.fc1.weight primals_10 = self.mlp.fc1.bias primals_11 = self.mlp.fc2.weight primals_12 = self.mlp.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
denisleonov/pytorch-CycleGAN-and-pix2pix
Block
false
12,280
[ "BSD-3-Clause" ]
0
d1a5f0c5911f70ed896f826619b4067ce737a83d
https://github.com/denisleonov/pytorch-CycleGAN-and-pix2pix/tree/d1a5f0c5911f70ed896f826619b4067ce737a83d
AttentivePooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uu/cuuixbc7clu3x4xnld3clwlzm4bwe3sea4shtlguimyhwbrzdnjg.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp8 = tmp7 + tmp3 tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 + tmp3 tmp14 = tmp11 + tmp13 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 + tmp3 tmp19 = tmp16 + tmp18 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/26/c26r27cnuaxawyutnmstc5xa4iiskv2zoymvaaom3awrmuaav47p.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => div, exp, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %squeeze), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_add_2 = async_compile.triton('triton_poi_fused__softmax_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2q/c2qalxfq2yxll6mbnxojhbih4eltcgaqrxucbiibajluczsoe5lz.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_6, buf2, primals_5, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_2.run(primals_6, buf2, primals_5, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del buf4 del primals_5 del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(primals_3, buf5, buf6, 256, grid=grid(256), stream=stream0) return (buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf5, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp8 = tmp7 + tmp3 tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 + tmp3 tmp14 = tmp11 + tmp13 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 + tmp3 tmp19 = tmp16 + tmp18 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = xindex // 4 % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_6, buf2, primals_5, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_2[grid(256)](primals_6, buf2, primals_5, buf3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf3 del buf4 del primals_5 del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_3[grid(256)](primals_3, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0 ), primals_3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf5, primals_4, buf7 class AttentivePoolingNew(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePoolingNew, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, input_0, input_1): primals_1 = self.W_a.weight primals_2 = self.W_a.bias primals_4 = self.W.weight primals_5 = self.W.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
czlwang/s3prl
AttentivePooling
false
12,281
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
Delta
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ac/cactp4raijeuupgkf72atpb5dqx2m7nq5k7fpzjavsrr2lquv6jf.py # Topologically Sorted Source Nodes: [specgram_1], Original ATen: [aten.replication_pad1d] # Source node to ATen node mapping: # specgram_1 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view, [None, None, %clamp_max]), kwargs = {}) triton_poi_fused_replication_pad1d_0 = async_compile.triton('triton_poi_fused_replication_pad1d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*(x1 % 4)) + (16*(x1 // 16)) + (64*((x1 // 4) % 4)) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ns/cnsqxcdmqckv2b4hbcx2tgslbei3i2eta37z6mobhzwxtt56do2q.py # Topologically Sorted Source Nodes: [arange, kernel], Original ATen: [aten.arange, aten.repeat] # Source node to ATen node mapping: # arange => add, convert_element_type, iota_1, mul # kernel => repeat # Graph fragment: # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (5,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_1, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -2), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%convert_element_type, [64, 1, 1]), kwargs = {}) triton_poi_fused_arange_repeat_1 = async_compile.triton('triton_poi_fused_arange_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = (-2) + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7o/c7o6lz3z3wc2hctevxaw5a7vifalwi3bccdwtwzi62tq4djyl2kg.py # Topologically Sorted Source Nodes: [specgram_3], Original ATen: [aten.replication_pad1d] # Source node to ATen node mapping: # specgram_3 => _unsafe_index_1 # Graph fragment: # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_2, [None, None, %clamp_max_1]), kwargs = {}) triton_poi_fused_replication_pad1d_2 = async_compile.triton('triton_poi_fused_replication_pad1d_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/i7/ci7zzwpyjyzmhwbdzexdktbiadv5a4qpgn2zumseiqxejm7orwxo.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %permute_1, %permute_3], -1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x4 = (xindex // 12) x1 = (xindex // 12) % 4 x2 = (xindex // 48) % 4 x3 = (xindex // 192) x5 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + (16*x3) + (64*x2) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tmp16 = tl.full([1], 12, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr2 + ((4*x1) + (16*x3) + (64*x2) + ((-8) + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + (x5), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [specgram_1], Original ATen: [aten.replication_pad1d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad1d_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [arange, kernel], Original ATen: [aten.arange, aten.repeat] triton_poi_fused_arange_repeat_1.run(buf1, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [specgram_1, arange, kernel, conv1d], Original ATen: [aten.replication_pad1d, aten.arange, aten.repeat, aten.convolution] buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [specgram_3], Original ATen: [aten.replication_pad1d] triton_poi_fused_replication_pad1d_2.run(buf2, buf3, 512, grid=grid(512), stream=stream0) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [arange_1, kernel_1], Original ATen: [aten.arange, aten.repeat] triton_poi_fused_arange_repeat_1.run(buf4, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [specgram_3, arange_1, kernel_1, conv1d_1], Original ATen: [aten.replication_pad1d, aten.arange, aten.repeat, aten.convolution] buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(arg0_1, buf2, buf5, buf6, 768, grid=grid(768), stream=stream0) del arg0_1 del buf2 del buf5 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torchaudio import transforms class Delta(nn.Module): def __init__(self, order=2, **kwargs): super(Delta, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, x): feats = [x] for o in range(self.order): feat = feats[-1].transpose(0, 1).unsqueeze(0) delta = self.compute_delta(feat) feats.append(delta.squeeze(0).transpose(0, 1)) x = torch.cat(feats, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchaudio import transforms assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * (x1 // 16) + 64 * (x1 // 4 % 4) + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = -2 + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x2, tmp1, xmask) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x4 = xindex // 12 x1 = xindex // 12 % 4 x2 = xindex // 48 % 4 x3 = xindex // 192 x5 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + 64 * x2 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp18 = tl.load(in_ptr2 + (4 * x1 + 16 * x3 + 64 * x2 + (-8 + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x5, tmp23, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0 del buf0 triton_poi_fused_replication_pad1d_2[grid(512)](buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) buf4 = buf1 del buf1 triton_poi_fused_arange_repeat_1[grid(320)](buf4, 320, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) triton_poi_fused_cat_3[grid(768)](arg0_1, buf2, buf5, buf6, 768, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf2 del buf5 return buf6, class DeltaNew(nn.Module): def __init__(self, order=2, **kwargs): super(DeltaNew, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
czlwang/s3prl
Delta
false
12,282
[ "Apache-2.0" ]
0
81d4bb8d051cee20fa87c083b8478999e1766172
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
ExpandingBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/6b/c6bb6lg6d5m65g4qbcp3ny32dxitmmbwz2omvwa4xf6ehsvoshnx.py # Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => convolution # x_1 => add, rsqrt, var_mean # x_2 => relu # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[8, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 8 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (r2 + (64*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 64.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(in_out_ptr0 + (r2 + (64*x3)), tmp2, xmask) tl.store(out_ptr2 + (r2 + (64*x3)), tmp27, xmask) tl.store(out_ptr3 + (r2 + (64*x3)), tmp29, xmask) tl.store(out_ptr4 + (x3), tmp24, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 8, 8), (128, 64, 8, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf6 = empty_strided_cuda((4, 2, 8, 8), (128, 64, 8, 1), torch.float32) buf7 = empty_strided_cuda((4, 2, 8, 8), (128, 64, 8, 1), torch.bool) buf5 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) # Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, buf6, buf7, buf5, 8, 64, grid=grid(8), stream=stream0) del primals_2 return (buf6, primals_1, primals_3, buf1, reinterpret_tensor(buf5, (8, ), (1, ), 0), buf7, reinterpret_tensor(buf2, (1, 8, 1, 1), (8, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 2, 3, 3), (18, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ExpandingBlock(nn.Module): def __init__(self, input_channels, use_bn=True): super(ExpandingBlock, self).__init__() self.conv1 = nn.ConvTranspose2d(input_channels, input_channels // 2, kernel_size=3, stride=2, padding=1, output_padding=1) if use_bn: self.instancenorm = nn.InstanceNorm2d(input_channels // 2) self.use_bn = use_bn self.activation = nn.ReLU() def forward(self, x): x = self.conv1(x) if self.use_bn: x = self.instancenorm(x) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0( in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 8 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (r2 + 64 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 64.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(in_out_ptr0 + (r2 + 64 * x3), tmp2, xmask) tl.store(out_ptr2 + (r2 + 64 * x3), tmp27, xmask) tl.store(out_ptr3 + (r2 + 64 * x3), tmp29, xmask) tl.store(out_ptr4 + x3, tmp24, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 8, 8), (128, 64, 8, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) buf6 = empty_strided_cuda((4, 2, 8, 8), (128, 64, 8, 1), torch.float32) buf7 = empty_strided_cuda((4, 2, 8, 8), (128, 64, 8, 1), torch.bool) buf5 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0[ grid(8)](buf1, primals_2, buf2, buf6, buf7, buf5, 8, 64, XBLOCK =8, num_warps=4, num_stages=1) del primals_2 return buf6, primals_1, primals_3, buf1, reinterpret_tensor(buf5, (8,), (1,), 0), buf7, reinterpret_tensor(buf2, (1, 8, 1, 1), (8, 1, 1, 1), 0) class ExpandingBlockNew(nn.Module): def __init__(self, input_channels, use_bn=True): super(ExpandingBlockNew, self).__init__() self.conv1 = nn.ConvTranspose2d(input_channels, input_channels // 2, kernel_size=3, stride=2, padding=1, output_padding=1) if use_bn: self.instancenorm = nn.InstanceNorm2d(input_channels // 2) self.use_bn = use_bn self.activation = nn.ReLU() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
diegushko/CycleGAN
ExpandingBlock
false
12,283
[ "MIT" ]
0
630d1cd00cef3f09f036d3c734d31c772cc0a786
https://github.com/diegushko/CycleGAN/tree/630d1cd00cef3f09f036d3c734d31c772cc0a786
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # cross_entropy => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s5/cs5wshnrdka3xma3btqijhothwpkw4ctmtyvsdzkv6seotnt4jpf.py # Topologically Sorted Source Nodes: [cross_entropy, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean] # Source node to ATen node mapping: # cross_entropy => exp, log, mul, neg, sub_1, sum_1, sum_2 # loss => mul_1 # mean => mean # neg => neg_1 # p => exp_1 # pow_1 => pow_1 # sub => sub_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%neg,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 0.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %neg), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp13 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = -tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = 1.0 tmp31 = tmp30 - tmp29 tmp32 = tmp30 * tmp27 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = 64.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp37, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [cross_entropy, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean] triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1.run(buf3, buf0, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.nn.functional as F def focal_loss(input_values, gamma): """Computes the focal loss""" p = torch.exp(-input_values) loss = (1 - p) ** gamma * input_values return loss.mean() class FocalLoss(nn.Module): def __init__(self, weight=None, gamma=0.0): super(FocalLoss, self).__init__() assert gamma >= 0 self.gamma = gamma self.weight = weight def forward(self, input, target): return focal_loss(F.cross_entropy(input, target, reduction='none', weight=self.weight), self.gamma) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = -tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = 1.0 tmp30 - tmp29 tmp32 = tmp30 * tmp27 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = 64.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1[grid(1)]( buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf3, def focal_loss(input_values, gamma): """Computes the focal loss""" p = torch.exp(-input_values) loss = (1 - p) ** gamma * input_values return loss.mean() class FocalLossNew(nn.Module): def __init__(self, weight=None, gamma=0.0): super(FocalLossNew, self).__init__() assert gamma >= 0 self.gamma = gamma self.weight = weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dixit-dude7/LDAM-DRW
FocalLoss
false
12,284
[ "MIT" ]
0
6366f4756d3ac0c6b6db784b7f20e16066967ed4
https://github.com/dixit-dude7/LDAM-DRW/tree/6366f4756d3ac0c6b6db784b7f20e16066967ed4
NormedLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xe/cxewggzrfqe57dzglxrzfhfgpsywlh36utvtdulp5oi75wfs7ml3.py # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1, out], Original ATen: [aten.div, aten.mm] extern_kernels.mm(buf0, buf1, out=buf2) del buf1 return (buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.nn.functional as F from torch.nn import Parameter class NormedLinear(nn.Module): def __init__(self, in_features, out_features): super(NormedLinear, self).__init__() self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0) def forward(self, x): out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0)) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) del buf1 return buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0) class NormedLinearNew(nn.Module): def __init__(self, in_features, out_features): super(NormedLinearNew, self).__init__() self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
dixit-dude7/LDAM-DRW
NormedLinear
false
12,285
[ "MIT" ]
0
6366f4756d3ac0c6b6db784b7f20e16066967ed4
https://github.com/dixit-dude7/LDAM-DRW/tree/6366f4756d3ac0c6b6db784b7f20e16066967ed4
Warp
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/f6/cf6lkg63q5feozbqgyxlaojyvgoj2ib3uzt3f7wr6x4ysaaewpgx.py # Topologically Sorted Source Nodes: [repeat, grid, mul, truediv, sub, setitem], Original ATen: [aten.repeat, aten.add, aten.mul, aten.div, aten.sub, aten.copy] # Source node to ATen node mapping: # grid => add # mul => mul # repeat => repeat # setitem => copy # sub => sub # truediv => div # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [4, 1, 1, 1]), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 2.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 3), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, 1.0), kwargs = {}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_1, %sub), kwargs = {}) # %select_scatter_default : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add, %copy, 1, 0), kwargs = {}) triton_poi_fused_add_copy_div_mul_repeat_sub_0 = async_compile.triton('triton_poi_fused_add_copy_div_mul_repeat_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_copy_div_mul_repeat_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_copy_div_mul_repeat_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) % 2 x1 = (xindex // 4) % 4 x0 = xindex % 4 x3 = (xindex // 32) x5 = xindex % 16 x6 = (xindex // 4) % 8 x7 = xindex tmp19 = tl.load(in_ptr0 + (x5 + (32*x3)), xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr0 + (x7), xmask) tmp0 = x2 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x1 tmp4 = tl.full([1], 0, tl.int64) tmp5 = tmp3 >= tmp4 tmp6 = tl.full([1], 4, tl.int64) tmp7 = tmp3 < tmp6 tmp8 = x0 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 >= tmp6 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp3 < tmp12 tmp14 = (-4) + x1 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp11, tmp14, tmp15) tmp17 = tl.where(tmp7, tmp10, tmp16) tmp18 = tmp17.to(tl.float32) tmp20 = tmp18 + tmp19 tmp21 = 2.0 tmp22 = tmp20 * tmp21 tmp23 = 0.3333333333333333 tmp24 = tmp22 * tmp23 tmp25 = 1.0 tmp26 = tmp24 - tmp25 tmp27 = x6 tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp6 tmp30 = tl.where(tmp29, tmp8, tmp9) tmp31 = tmp27 >= tmp6 tmp32 = tmp27 < tmp12 tmp33 = (-4) + x1 + (4*x2) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp30, tmp35) tmp37 = tmp36.to(tl.float32) tmp39 = tmp37 + tmp38 tmp40 = tl.where(tmp2, tmp26, tmp39) tl.store(out_ptr0 + (x7), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hk/chkc76o45uaqvv7aygjjxvrhjfcfgzsvamt5pmidxcvte5ajxqfy.py # Topologically Sorted Source Nodes: [out, mask, mask_1, gt, mask_2, mul_2], Original ATen: [aten.grid_sampler_2d, aten.ones, aten.gt, aten._to_copy, aten.mul] # Source node to ATen node mapping: # gt => gt # mask => full_default_12 # mask_1 => add_10, add_11, add_12, add_13, add_14, add_8, add_9, convert_element_type_10, convert_element_type_16, convert_element_type_9, floor_2, floor_3, full_default_13, full_default_14, full_default_15, full_default_18, full_default_21, full_default_23, full_default_24, ge_10, ge_11, ge_12, ge_13, ge_14, ge_15, ge_8, ge_9, index_4, index_5, index_6, index_7, logical_and_12, logical_and_13, logical_and_14, logical_and_15, logical_and_16, logical_and_17, logical_and_18, logical_and_19, logical_and_20, logical_and_21, logical_and_22, logical_and_23, lt_10, lt_11, lt_12, lt_13, lt_14, lt_15, lt_8, lt_9, mul_12, mul_13, mul_14, mul_15, mul_16, mul_17, mul_18, mul_19, mul_20, mul_21, sub_10, sub_11, sub_12, sub_13, sub_14, sub_15, sub_16, sub_17, where_12, where_13, where_14, where_17, where_20, where_22, where_23 # mask_2 => convert_element_type_17 # mul_2 => mul_22 # out => add_1, add_2, add_3, add_4, add_5, add_6, add_7, convert_element_type_1, convert_element_type_2, convert_element_type_8, floor, floor_1, full_default, full_default_1, full_default_10, full_default_11, full_default_2, full_default_5, full_default_8, ge, ge_1, ge_2, ge_3, ge_4, ge_5, ge_6, ge_7, index, index_1, index_2, index_3, logical_and, logical_and_1, logical_and_10, logical_and_11, logical_and_2, logical_and_3, logical_and_4, logical_and_5, logical_and_6, logical_and_7, logical_and_8, logical_and_9, lt, lt_1, lt_2, lt_3, lt_4, lt_5, lt_6, lt_7, mul_10, mul_11, mul_2, mul_3, mul_4, mul_5, mul_6, mul_7, mul_8, mul_9, sub_2, sub_3, sub_4, sub_5, sub_6, sub_7, sub_8, sub_9, where, where_1, where_10, where_11, where_2, where_5, where_8 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, 2.0), kwargs = {}) # %add_1 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1.5), kwargs = {}) # %floor : [num_users=9] = call_function[target=torch.ops.aten.floor.default](args = (%add_1,), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor, 0), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor, 4), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, 2.0), kwargs = {}) # %add_2 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1.5), kwargs = {}) # %floor_1 : [num_users=9] = call_function[target=torch.ops.aten.floor.default](args = (%add_2,), kwargs = {}) # %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_1, 0), kwargs = {}) # %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_1, 4), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_1, %lt_1), kwargs = {}) # %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt, %logical_and), kwargs = {}) # %logical_and_2 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %logical_and_1), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%floor_1, torch.int64), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %convert_element_type_2, %full_default_1), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%floor, torch.int64), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %convert_element_type_1, %full_default), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%view_4, %view_5, %where_1, %where]), kwargs = {}) # %add_3 : [num_users=8] = call_function[target=torch.ops.aten.add.Tensor](args = (%floor, 1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_1), kwargs = {}) # %add_4 : [num_users=8] = call_function[target=torch.ops.aten.add.Tensor](args = (%floor_1, 1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %add_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %sub_3), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %mul_4, %full_default_2), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %where_2), kwargs = {}) # %ge_2 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_3, 0), kwargs = {}) # %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_3, 4), kwargs = {}) # %ge_3 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_1, 0), kwargs = {}) # %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_1, 4), kwargs = {}) # %logical_and_3 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_3, %lt_3), kwargs = {}) # %logical_and_4 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_2, %logical_and_3), kwargs = {}) # %logical_and_5 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_2, %logical_and_4), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%view_4, %view_5, %where_4, %where_3]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %floor), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %add_2), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %sub_5), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_5, %mul_5, %full_default_5), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_1, %where_5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %mul_9), kwargs = {}) # %ge_4 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor, 0), kwargs = {}) # %lt_4 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor, 4), kwargs = {}) # %ge_5 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_4, 0), kwargs = {}) # %lt_5 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_4, 4), kwargs = {}) # %logical_and_6 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_5, %lt_5), kwargs = {}) # %logical_and_7 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_4, %logical_and_6), kwargs = {}) # %logical_and_8 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_4, %logical_and_7), kwargs = {}) # %index_2 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%view_4, %view_5, %where_7, %where_6]), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_1), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %floor_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %sub_7), kwargs = {}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_8, %mul_6, %full_default_8), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_2, %where_8), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_10), kwargs = {}) # %ge_6 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_3, 0), kwargs = {}) # %lt_6 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_3, 4), kwargs = {}) # %ge_7 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_4, 0), kwargs = {}) # %lt_7 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_4, 4), kwargs = {}) # %logical_and_9 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_7, %lt_7), kwargs = {}) # %logical_and_10 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_6, %logical_and_9), kwargs = {}) # %logical_and_11 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_6, %logical_and_10), kwargs = {}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.int64), kwargs = {}) # %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_10 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_11, %convert_element_type_8, %full_default_10), kwargs = {}) # %index_3 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%view_4, %view_5, %where_10, %where_9]), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %floor), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %floor_1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %sub_9), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_11 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_11, %mul_7, %full_default_11), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_3, %where_11), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_11), kwargs = {}) # %full_default_12 : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, 2.0), kwargs = {}) # %add_8 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, 1.5), kwargs = {}) # %floor_2 : [num_users=9] = call_function[target=torch.ops.aten.floor.default](args = (%add_8,), kwargs = {}) # %ge_8 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_2, 0), kwargs = {}) # %lt_8 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_2, 4), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_11, 2.0), kwargs = {}) # %add_9 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_13, 1.5), kwargs = {}) # %floor_3 : [num_users=9] = call_function[target=torch.ops.aten.floor.default](args = (%add_9,), kwargs = {}) # %ge_9 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_3, 0), kwargs = {}) # %lt_9 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_3, 4), kwargs = {}) # %logical_and_12 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_9, %lt_9), kwargs = {}) # %logical_and_13 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_8, %logical_and_12), kwargs = {}) # %logical_and_14 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_8, %logical_and_13), kwargs = {}) # %convert_element_type_10 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%floor_3, torch.int64), kwargs = {}) # %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_13 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_14, %convert_element_type_10, %full_default_14), kwargs = {}) # %convert_element_type_9 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%floor_2, torch.int64), kwargs = {}) # %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_14, %convert_element_type_9, %full_default_13), kwargs = {}) # %index_4 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%full_default_12, [%view_19, %view_20, %where_13, %where_12]), kwargs = {}) # %add_10 : [num_users=8] = call_function[target=torch.ops.aten.add.Tensor](args = (%floor_2, 1), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_10, %add_8), kwargs = {}) # %add_11 : [num_users=8] = call_function[target=torch.ops.aten.add.Tensor](args = (%floor_3, 1), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_11, %add_9), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %sub_11), kwargs = {}) # %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_14 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_14, %mul_14, %full_default_15), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_4, %where_14), kwargs = {}) # %ge_10 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_10, 0), kwargs = {}) # %lt_10 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_10, 4), kwargs = {}) # %ge_11 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_3, 0), kwargs = {}) # %lt_11 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_3, 4), kwargs = {}) # %logical_and_15 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_11, %lt_11), kwargs = {}) # %logical_and_16 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_10, %logical_and_15), kwargs = {}) # %logical_and_17 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_10, %logical_and_16), kwargs = {}) # %index_5 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%full_default_12, [%view_19, %view_20, %where_16, %where_15]), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %floor_2), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_11, %add_9), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %sub_13), kwargs = {}) # %full_default_18 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_17 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_17, %mul_15, %full_default_18), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_5, %where_17), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_18, %mul_19), kwargs = {}) # %ge_12 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_2, 0), kwargs = {}) # %lt_12 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_2, 4), kwargs = {}) # %ge_13 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_11, 0), kwargs = {}) # %lt_13 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_11, 4), kwargs = {}) # %logical_and_18 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_13, %lt_13), kwargs = {}) # %logical_and_19 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_12, %logical_and_18), kwargs = {}) # %logical_and_20 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_12, %logical_and_19), kwargs = {}) # %index_6 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%full_default_12, [%view_19, %view_20, %where_19, %where_18]), kwargs = {}) # %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_10, %add_8), kwargs = {}) # %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %floor_3), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %sub_15), kwargs = {}) # %full_default_21 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_20 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_20, %mul_16, %full_default_21), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_6, %where_20), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %mul_20), kwargs = {}) # %ge_14 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_10, 0), kwargs = {}) # %lt_14 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_10, 4), kwargs = {}) # %ge_15 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_11, 0), kwargs = {}) # %lt_15 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_11, 4), kwargs = {}) # %logical_and_21 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_15, %lt_15), kwargs = {}) # %logical_and_22 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_14, %logical_and_21), kwargs = {}) # %logical_and_23 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_14, %logical_and_22), kwargs = {}) # %convert_element_type_16 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_11, torch.int64), kwargs = {}) # %full_default_23 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_22 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_23, %convert_element_type_16, %full_default_23), kwargs = {}) # %index_7 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%full_default_12, [%view_19, %view_20, %where_22, %where_21]), kwargs = {}) # %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %floor_2), kwargs = {}) # %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %floor_3), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %sub_17), kwargs = {}) # %full_default_24 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_23 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_23, %mul_17, %full_default_24), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_7, %where_23), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %mul_21), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_14, 0.9999), kwargs = {}) # %convert_element_type_17 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %convert_element_type_17), kwargs = {}) triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1 = async_compile.triton('triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1', 'mutated_arg_names': ['in_out_ptr5'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1(in_out_ptr5, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex x4 = (xindex // 16) tmp3 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 0, tl.int32) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = 0.3333333333333333 tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp7 - tmp8 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp11 * tmp4 tmp13 = 1.5 tmp14 = tmp12 + tmp13 tmp15 = libdevice.floor(tmp14) tmp16 = tmp15 + tmp8 tmp17 = 4.0 tmp18 = tmp16 < tmp17 tmp19 = tmp1 == tmp1 tmp20 = tl.where(tmp19, tmp9, tmp3) tmp21 = tmp20 * tmp4 tmp22 = tmp21 + tmp13 tmp23 = libdevice.floor(tmp22) tmp24 = tmp23 + tmp8 tmp25 = 0.0 tmp26 = tmp24 >= tmp25 tmp27 = tmp24 < tmp17 tmp28 = tmp26 & tmp27 tmp29 = tmp18 & tmp28 tmp30 = tmp15 >= tmp25 tmp31 = tmp15 < tmp17 tmp32 = tmp31 & tmp28 tmp33 = tmp30 & tmp32 tmp34 = tmp16 >= tmp25 tmp35 = tmp23 >= tmp25 tmp36 = tmp23 < tmp17 tmp37 = tmp35 & tmp36 tmp38 = tmp18 & tmp37 tmp39 = tmp34 & tmp38 tmp40 = tmp31 & tmp37 tmp41 = tmp30 & tmp40 tmp42 = tmp16 - tmp14 tmp43 = tmp24 - tmp22 tmp44 = tmp42 * tmp43 tmp45 = tl.where(tmp41, tmp44, tmp25) tmp46 = tmp23.to(tl.int64) tmp47 = tl.full([1], 0, tl.int64) tmp48 = tl.where(tmp39, tmp46, tmp47) tmp49 = tl.full([XBLOCK], 4, tl.int32) tmp50 = tmp48 + tmp49 tmp51 = tmp48 < 0 tmp52 = tl.where(tmp51, tmp50, tmp48) tl.device_assert(((0 <= tmp52) & (tmp52 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp52 < 4") tmp54 = tmp16.to(tl.int64) tmp55 = tl.where(tmp39, tmp54, tmp47) tmp56 = tmp55 + tmp49 tmp57 = tmp55 < 0 tmp58 = tl.where(tmp57, tmp56, tmp55) tl.device_assert(((0 <= tmp58) & (tmp58 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp58 < 4") tmp60 = tmp14 - tmp15 tmp61 = tmp60 * tmp43 tmp62 = tl.where(tmp39, tmp61, tmp25) tmp63 = tmp8 * tmp62 tmp64 = tmp24.to(tl.int64) tmp65 = tl.where(tmp33, tmp64, tmp47) tmp66 = tmp65 + tmp49 tmp67 = tmp65 < 0 tmp68 = tl.where(tmp67, tmp66, tmp65) tl.device_assert(((0 <= tmp68) & (tmp68 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp68 < 4") tmp70 = tmp15.to(tl.int64) tmp71 = tl.where(tmp33, tmp70, tmp47) tmp72 = tmp71 + tmp49 tmp73 = tmp71 < 0 tmp74 = tl.where(tmp73, tmp72, tmp71) tl.device_assert(((0 <= tmp74) & (tmp74 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp74 < 4") tmp76 = tmp22 - tmp23 tmp77 = tmp42 * tmp76 tmp78 = tl.where(tmp33, tmp77, tmp25) tmp79 = tmp8 * tmp78 tmp80 = tmp34 & tmp29 tmp81 = tmp60 * tmp76 tmp82 = tl.where(tmp80, tmp81, tmp25) tmp83 = tl.where(tmp41, tmp46, tmp47) tmp84 = tl.where(tmp41, tmp70, tmp47) tmp85 = tl.where(tmp80, tmp64, tmp47) tmp86 = tmp83 + tmp49 tmp87 = tmp83 < 0 tmp88 = tl.where(tmp87, tmp86, tmp83) tl.device_assert(((0 <= tmp88) & (tmp88 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp88 < 4") tmp90 = tmp84 + tmp49 tmp91 = tmp84 < 0 tmp92 = tl.where(tmp91, tmp90, tmp84) tl.device_assert(((0 <= tmp92) & (tmp92 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp92 < 4") tmp94 = tmp8 * tmp45 tmp95 = tmp94 + tmp63 tmp96 = tmp95 + tmp79 tmp97 = tmp85 + tmp49 tmp98 = tmp85 < 0 tmp99 = tl.where(tmp98, tmp97, tmp85) tl.device_assert(((0 <= tmp99) & (tmp99 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp99 < 4") tmp101 = tl.where(tmp80, tmp54, tmp47) tmp102 = tmp101 + tmp49 tmp103 = tmp101 < 0 tmp104 = tl.where(tmp103, tmp102, tmp101) tl.device_assert(((0 <= tmp104) & (tmp104 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp104 < 4") tmp106 = tmp8 * tmp82 tmp107 = tmp96 + tmp106 tmp108 = tl.load(in_ptr1 + (tmp58 + (4*tmp52) + (16*x4)), xmask, eviction_policy='evict_last') tmp109 = tmp108 * tmp62 tmp110 = tl.load(in_ptr1 + (tmp74 + (4*tmp68) + (16*x4)), xmask, eviction_policy='evict_last') tmp111 = tmp110 * tmp78 tmp112 = tl.load(in_ptr1 + (tmp104 + (4*tmp99) + (16*x4)), xmask, eviction_policy='evict_last') tmp113 = tmp112 * tmp82 tmp114 = tl.load(in_ptr1 + (tmp92 + (4*tmp88) + (16*x4)), xmask, eviction_policy='evict_last') tmp115 = tmp114 * tmp45 tmp116 = tmp115 + tmp109 tmp117 = tmp116 + tmp111 tmp118 = tmp117 + tmp113 tmp119 = 0.9999 tmp120 = tmp107 > tmp119 tmp121 = tmp120.to(tl.float32) tmp122 = tmp118 * tmp121 tl.store(in_out_ptr5 + (x3), tmp122, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat, grid, mul, truediv, sub, setitem], Original ATen: [aten.repeat, aten.add, aten.mul, aten.div, aten.sub, aten.copy] stream0 = get_raw_stream(0) triton_poi_fused_add_copy_div_mul_repeat_sub_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0) del arg0_1 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf12; del buf12 # reuse buf27 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [out, mask, mask_1, gt, mask_2, mul_2], Original ATen: [aten.grid_sampler_2d, aten.ones, aten.gt, aten._to_copy, aten.mul] triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1.run(buf27, buf0, arg1_1, 256, grid=grid(256), stream=stream0) del arg1_1 del buf0 return (buf27, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn as nn import torch.nn.functional as F def coords_grid(flow: 'Tensor') ->Tensor: """Generate shifted coordinate grid based based input flow. Args: flow (Tensor): Estimated optical flow. Returns: Tensor: The coordinate that shifted by input flow and scale in the range [-1, 1]. """ B, _, H, W = flow.shape xx = torch.arange(0, W, device=flow.device, requires_grad=False) yy = torch.arange(0, H, device=flow.device, requires_grad=False) coords = torch.meshgrid(yy, xx) coords = torch.stack(coords[::-1], dim=0).float() grid = coords[None].repeat(B, 1, 1, 1) + flow grid[:, 0, ...] = grid[:, 0, ...] * 2.0 / max(W - 1, 1) - 1.0 grid[:, 1, ...] = grid[:, 1, ...] * 2.0 / max(H - 1, 1) - 1.0 grid = grid.permute(0, 2, 3, 1) return grid class Warp(nn.Module): """Warping layer to warp feature using optical flow. Args: mode (str): interpolation mode to calculate output values. Options are 'bilinear' and 'nearest'. Defaults to 'bilinear'. padding_mode (str): padding mode for outside grid values. Options are 'zero', 'border' and 'reflection'. Defaults to 'zeros'. align_corners (bool): If set to True, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If set to False, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Default to False. """ def __init__(self, mode: 'str'='bilinear', padding_mode: 'str'='zeros', align_corners: 'bool'=False, use_mask: 'bool'=True) ->None: super().__init__() self.mode = mode self.padding_mode = padding_mode self.align_corners = align_corners self.use_mask = use_mask def forward(self, feat: 'Tensor', flow: 'Tensor') ->Tensor: """Forward function for warp. Args: feat (Tensor): Input feature flow (Tensor): Input optical flow. Returns: Tensor: The output feature that was generated by warping input feature based input flow. """ grid = coords_grid(flow) out = F.grid_sample(feat, grid, mode=self.mode, padding_mode=self. padding_mode, align_corners=self.align_corners) mask = torch.ones(feat.size(), device=feat.device, requires_grad=False) if self.use_mask: mask = F.grid_sample(mask, grid, mode=self.mode, padding_mode= self.padding_mode, align_corners=self.align_corners) mask = (mask > 0.9999).float() return out * mask def __repr__(self): s = self.__class__.__name__ s += f'(mode={self.mode}, ' s += f'padding_mode={self.padding_mode}, ' s += f'align_corners={self.align_corners},' s += f'use_mask={self.use_mask})' return s def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 2, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import Tensor import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_copy_div_mul_repeat_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 2 x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex // 32 x5 = xindex % 16 x6 = xindex // 4 % 8 x7 = xindex tmp19 = tl.load(in_ptr0 + (x5 + 32 * x3), xmask, eviction_policy= 'evict_last') tmp38 = tl.load(in_ptr0 + x7, xmask) tmp0 = x2 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x1 tl.full([1], 0, tl.int64) tmp6 = tl.full([1], 4, tl.int64) tmp7 = tmp3 < tmp6 tmp8 = x0 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 >= tmp6 tl.full([1], 8, tl.int64) tmp14 = -4 + x1 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp11, tmp14, tmp15) tmp17 = tl.where(tmp7, tmp10, tmp16) tmp18 = tmp17.to(tl.float32) tmp20 = tmp18 + tmp19 tmp21 = 2.0 tmp22 = tmp20 * tmp21 tmp23 = 0.3333333333333333 tmp24 = tmp22 * tmp23 tmp25 = 1.0 tmp26 = tmp24 - tmp25 tmp27 = x6 tmp29 = tmp27 < tmp6 tmp30 = tl.where(tmp29, tmp8, tmp9) tmp31 = tmp27 >= tmp6 tmp33 = -4 + x1 + 4 * x2 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp30, tmp35) tmp37 = tmp36.to(tl.float32) tmp39 = tmp37 + tmp38 tmp40 = tl.where(tmp2, tmp26, tmp39) tl.store(out_ptr0 + x7, tmp40, xmask) @triton.jit def triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1(in_out_ptr5, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex x4 = xindex // 16 tmp3 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp0 = tl.full([1], 0, tl.int32) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = 0.3333333333333333 tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp7 - tmp8 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp11 * tmp4 tmp13 = 1.5 tmp14 = tmp12 + tmp13 tmp15 = libdevice.floor(tmp14) tmp16 = tmp15 + tmp8 tmp17 = 4.0 tmp18 = tmp16 < tmp17 tmp19 = tmp1 == tmp1 tmp20 = tl.where(tmp19, tmp9, tmp3) tmp21 = tmp20 * tmp4 tmp22 = tmp21 + tmp13 tmp23 = libdevice.floor(tmp22) tmp24 = tmp23 + tmp8 tmp25 = 0.0 tmp26 = tmp24 >= tmp25 tmp27 = tmp24 < tmp17 tmp28 = tmp26 & tmp27 tmp29 = tmp18 & tmp28 tmp30 = tmp15 >= tmp25 tmp31 = tmp15 < tmp17 tmp32 = tmp31 & tmp28 tmp33 = tmp30 & tmp32 tmp34 = tmp16 >= tmp25 tmp35 = tmp23 >= tmp25 tmp36 = tmp23 < tmp17 tmp37 = tmp35 & tmp36 tmp38 = tmp18 & tmp37 tmp39 = tmp34 & tmp38 tmp40 = tmp31 & tmp37 tmp41 = tmp30 & tmp40 tmp42 = tmp16 - tmp14 tmp43 = tmp24 - tmp22 tmp44 = tmp42 * tmp43 tmp45 = tl.where(tmp41, tmp44, tmp25) tmp46 = tmp23.to(tl.int64) tmp47 = tl.full([1], 0, tl.int64) tmp48 = tl.where(tmp39, tmp46, tmp47) tmp49 = tl.full([XBLOCK], 4, tl.int32) tmp50 = tmp48 + tmp49 tmp51 = tmp48 < 0 tmp52 = tl.where(tmp51, tmp50, tmp48) tl.device_assert((0 <= tmp52) & (tmp52 < 4) | ~xmask, 'index out of bounds: 0 <= tmp52 < 4') tmp54 = tmp16.to(tl.int64) tmp55 = tl.where(tmp39, tmp54, tmp47) tmp56 = tmp55 + tmp49 tmp57 = tmp55 < 0 tmp58 = tl.where(tmp57, tmp56, tmp55) tl.device_assert((0 <= tmp58) & (tmp58 < 4) | ~xmask, 'index out of bounds: 0 <= tmp58 < 4') tmp60 = tmp14 - tmp15 tmp61 = tmp60 * tmp43 tmp62 = tl.where(tmp39, tmp61, tmp25) tmp63 = tmp8 * tmp62 tmp64 = tmp24.to(tl.int64) tmp65 = tl.where(tmp33, tmp64, tmp47) tmp66 = tmp65 + tmp49 tmp67 = tmp65 < 0 tmp68 = tl.where(tmp67, tmp66, tmp65) tl.device_assert((0 <= tmp68) & (tmp68 < 4) | ~xmask, 'index out of bounds: 0 <= tmp68 < 4') tmp70 = tmp15.to(tl.int64) tmp71 = tl.where(tmp33, tmp70, tmp47) tmp72 = tmp71 + tmp49 tmp73 = tmp71 < 0 tmp74 = tl.where(tmp73, tmp72, tmp71) tl.device_assert((0 <= tmp74) & (tmp74 < 4) | ~xmask, 'index out of bounds: 0 <= tmp74 < 4') tmp76 = tmp22 - tmp23 tmp77 = tmp42 * tmp76 tmp78 = tl.where(tmp33, tmp77, tmp25) tmp79 = tmp8 * tmp78 tmp80 = tmp34 & tmp29 tmp81 = tmp60 * tmp76 tmp82 = tl.where(tmp80, tmp81, tmp25) tmp83 = tl.where(tmp41, tmp46, tmp47) tmp84 = tl.where(tmp41, tmp70, tmp47) tmp85 = tl.where(tmp80, tmp64, tmp47) tmp86 = tmp83 + tmp49 tmp87 = tmp83 < 0 tmp88 = tl.where(tmp87, tmp86, tmp83) tl.device_assert((0 <= tmp88) & (tmp88 < 4) | ~xmask, 'index out of bounds: 0 <= tmp88 < 4') tmp90 = tmp84 + tmp49 tmp91 = tmp84 < 0 tmp92 = tl.where(tmp91, tmp90, tmp84) tl.device_assert((0 <= tmp92) & (tmp92 < 4) | ~xmask, 'index out of bounds: 0 <= tmp92 < 4') tmp94 = tmp8 * tmp45 tmp95 = tmp94 + tmp63 tmp96 = tmp95 + tmp79 tmp97 = tmp85 + tmp49 tmp98 = tmp85 < 0 tmp99 = tl.where(tmp98, tmp97, tmp85) tl.device_assert((0 <= tmp99) & (tmp99 < 4) | ~xmask, 'index out of bounds: 0 <= tmp99 < 4') tmp101 = tl.where(tmp80, tmp54, tmp47) tmp102 = tmp101 + tmp49 tmp103 = tmp101 < 0 tmp104 = tl.where(tmp103, tmp102, tmp101) tl.device_assert((0 <= tmp104) & (tmp104 < 4) | ~xmask, 'index out of bounds: 0 <= tmp104 < 4') tmp106 = tmp8 * tmp82 tmp107 = tmp96 + tmp106 tmp108 = tl.load(in_ptr1 + (tmp58 + 4 * tmp52 + 16 * x4), xmask, eviction_policy='evict_last') tmp109 = tmp108 * tmp62 tmp110 = tl.load(in_ptr1 + (tmp74 + 4 * tmp68 + 16 * x4), xmask, eviction_policy='evict_last') tmp111 = tmp110 * tmp78 tmp112 = tl.load(in_ptr1 + (tmp104 + 4 * tmp99 + 16 * x4), xmask, eviction_policy='evict_last') tmp113 = tmp112 * tmp82 tmp114 = tl.load(in_ptr1 + (tmp92 + 4 * tmp88 + 16 * x4), xmask, eviction_policy='evict_last') tmp115 = tmp114 * tmp45 tmp116 = tmp115 + tmp109 tmp117 = tmp116 + tmp111 tmp118 = tmp117 + tmp113 tmp119 = 0.9999 tmp120 = tmp107 > tmp119 tmp121 = tmp120.to(tl.float32) tmp122 = tmp118 * tmp121 tl.store(in_out_ptr5 + x3, tmp122, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_copy_div_mul_repeat_sub_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf12 del buf12 buf27 = buf13 del buf13 triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1[grid(256)]( buf27, buf0, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 del buf0 return buf27, def coords_grid(flow: 'Tensor') ->Tensor: """Generate shifted coordinate grid based based input flow. Args: flow (Tensor): Estimated optical flow. Returns: Tensor: The coordinate that shifted by input flow and scale in the range [-1, 1]. """ B, _, H, W = flow.shape xx = torch.arange(0, W, device=flow.device, requires_grad=False) yy = torch.arange(0, H, device=flow.device, requires_grad=False) coords = torch.meshgrid(yy, xx) coords = torch.stack(coords[::-1], dim=0).float() grid = coords[None].repeat(B, 1, 1, 1) + flow grid[:, 0, ...] = grid[:, 0, ...] * 2.0 / max(W - 1, 1) - 1.0 grid[:, 1, ...] = grid[:, 1, ...] * 2.0 / max(H - 1, 1) - 1.0 grid = grid.permute(0, 2, 3, 1) return grid class WarpNew(nn.Module): """Warping layer to warp feature using optical flow. Args: mode (str): interpolation mode to calculate output values. Options are 'bilinear' and 'nearest'. Defaults to 'bilinear'. padding_mode (str): padding mode for outside grid values. Options are 'zero', 'border' and 'reflection'. Defaults to 'zeros'. align_corners (bool): If set to True, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If set to False, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Default to False. """ def __init__(self, mode: 'str'='bilinear', padding_mode: 'str'='zeros', align_corners: 'bool'=False, use_mask: 'bool'=True) ->None: super().__init__() self.mode = mode self.padding_mode = padding_mode self.align_corners = align_corners self.use_mask = use_mask def __repr__(self): s = self.__class__.__name__ s += f'(mode={self.mode}, ' s += f'padding_mode={self.padding_mode}, ' s += f'align_corners={self.align_corners},' s += f'use_mask={self.use_mask})' return s def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dimagrshk/opt_flow_attack
Warp
false
12,286
[ "Apache-2.0" ]
0
6bfad92abcf3eaae1a6ca05b865be072361636ed
https://github.com/dimagrshk/opt_flow_attack/tree/6bfad92abcf3eaae1a6ca05b865be072361636ed
Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/pc/cpcjyun75urr2yzakpnbvk3y55lgrnq5jvzgshllbkpsgoepznsz.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 0.25 tmp17 = libdevice.pow(tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp0 / tmp19 tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn.parallel import torch.utils.data import torch.nn.functional as F import torch.onnx import torch.optim import torch.utils.data.distributed class Normalize(torch.nn.Module): """Normalize a tensor image with mean and standard deviation. This transform does not support PIL Image. Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` channels, this transform will normalize each channel of the input ``torch.*Tensor`` i.e., ``output[channel] = (input[channel] - mean[channel]) / std[channel]`` .. note:: This transform acts out of place, i.e., it does not mutate the input tensor. Args: mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. inplace(bool,optional): Bool to make this operation in-place. """ def __init__(self, mean, std, inplace=False): super().__init__() self.mean = mean self.std = std self.inplace = inplace def forward(self, tensor: 'Tensor') ->Tensor: """ Args: tensor (Tensor): Tensor image to be normalized. Returns: Tensor: Normalized Tensor image. """ return F.normalize(tensor, self.mean, self.std, self.inplace) def __repr__(self): return self.__class__.__name__ + '(mean={0}, std={1})'.format(self. mean, self.std) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'mean': 4, 'std': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.parallel import torch.utils.data import torch.onnx import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp1 * tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 0.25 tmp17 = libdevice.pow(tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp0 / tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(torch.nn.Module): """Normalize a tensor image with mean and standard deviation. This transform does not support PIL Image. Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` channels, this transform will normalize each channel of the input ``torch.*Tensor`` i.e., ``output[channel] = (input[channel] - mean[channel]) / std[channel]`` .. note:: This transform acts out of place, i.e., it does not mutate the input tensor. Args: mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. inplace(bool,optional): Bool to make this operation in-place. """ def __init__(self, mean, std, inplace=False): super().__init__() self.mean = mean self.std = std self.inplace = inplace def __repr__(self): return self.__class__.__name__ + '(mean={0}, std={1})'.format(self. mean, self.std) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dineenai/pytorch_untrained_models
Normalize
false
12,287
[ "BSD-3-Clause" ]
0
eb301d3b8e3e87b8a79cd8cb4e1cb8d4e44a273a
https://github.com/dineenai/pytorch_untrained_models/tree/eb301d3b8e3e87b8a79cd8cb4e1cb8d4e44a273a
LowRankResidualDecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/7j/c7jycjp5htd6f5jdvv2i4z3gzdi3nf2c4tjg2ydcvoi5symiidqg.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # linear => convert_element_type_1 # Graph fragment: # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%primals_1, torch.float16), kwargs = {}) triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp16', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mp/cmpsbcrgyc56gvohxoei4nkltnxe3xirinqdxwxqfej56pgtfkar.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # linear => convert_element_type # Graph fragment: # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%primals_2, torch.float16), kwargs = {}) triton_poi_fused__to_copy_1 = async_compile.triton('triton_poi_fused__to_copy_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp16', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6r/c6rxu6byphv4un7ktkqpdpp2mtv6jdosbhgxup6c76zy3yqtvxek.py # Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone] # Source node to ATen node mapping: # attn => clone # truediv => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_9, 2.0), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_div_2 = async_compile.triton('triton_poi_fused_clone_div_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp16', 1: '*fp16', 2: '*fp16', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x4), xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x4), xmask).to(tl.float32) tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5z/c5zdit3b3gncrtrhksz3wugrwokfwhxiktdrpfc4lyxhd765qkvw.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp16', 1: '*fp16', 2: '*fp16', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x2 + (16*y3)), xmask & ymask).to(tl.float32) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jx/cjxv75hajxx5elwieb4njn6zfg5rafhoeq5rerbjaunnsbndxs4d.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, convert_element_type_35, exp, sub # Graph fragment: # %convert_element_type_35 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_23, torch.float32), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convert_element_type_35, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convert_element_type_35, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp16', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last').to(tl.float32) tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/og/cogep24zfncw4nygiwn2xcmmqm7rt7wgmgue4s4uba3a22dqrr3i.py # Topologically Sorted Source Nodes: [softmax, output], Original ATen: [aten._softmax, aten._to_copy] # Source node to ATen node mapping: # output => convert_element_type_36 # softmax => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %convert_element_type_36 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%div_1, torch.float16), kwargs = {}) triton_poi_fused__softmax__to_copy_5 = async_compile.triton('triton_poi_fused__softmax__to_copy_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp16', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax__to_copy_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax__to_copy_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/i5/ci5jnti4ulig4bvrenra5rcts24yk5uwrlszn2zg6dssexq53tnh.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp16', 1: '*fp16', 2: '*fp16', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x4), xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x4), xmask).to(tl.float32) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/eq/ceqs6zn4xlihole7trexitgzzhcvid5u455d54zikujqk26j5mxb.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_13,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp16', 1: '*fp16', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask).to(tl.float32) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rh/crho5uemcsgmoawdcwbuvpityj4ij5dq3my3ikoy5nffsgeu43hy.py # Topologically Sorted Source Nodes: [linear_10], Original ATen: [aten._to_copy, aten.t] # Source node to ATen node mapping: # linear_10 => convert_element_type_42, permute_15 # Graph fragment: # %convert_element_type_42 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%primals_12, torch.float16), kwargs = {}) # %permute_15 : [num_users=2] = call_function[target=torch.ops.aten.permute.default](args = (%convert_element_type_42, [1, 0]), kwargs = {}) triton_poi_fused__to_copy_t_8 = async_compile.triton('triton_poi_fused__to_copy_t_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp16', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_t_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_t_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dd/cdd44gygumakzbi7zyhnhbcrkskfgrf3ltivpidu4dadautrbrgl.py # Topologically Sorted Source Nodes: [add_3, q_4], Original ATen: [aten.add] # Source node to ATen node mapping: # add_3 => add_3 # q_4 => add_4 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_31, %view_33), kwargs = {}) # %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_1), kwargs = {}) triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp16', 1: '*fp16', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x0), xmask).to(tl.float32) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/in/cinndmzytbittomxphtrtjvi2ocrspv6cyphmacpq5bjpvueh5mk.py # Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # q_5 => add_5, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [2]), kwargs = {correction: 0, keepdim: True}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {}) triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/d2/cd2v4afwnnqdrgticluj3xnb55jjthl3xmxanzaiyi5j6flkwydd.py # Topologically Sorted Source Nodes: [q_5, linear_12], Original ATen: [aten.native_layer_norm, aten._to_copy] # Source node to ATen node mapping: # linear_12 => convert_element_type_51 # q_5 => add_5, add_6, mul, mul_1, rsqrt, sub_1, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [2]), kwargs = {correction: 0, keepdim: True}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_14), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_15), kwargs = {}) # %convert_element_type_51 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_6, torch.float16), kwargs = {}) triton_poi_fused__to_copy_native_layer_norm_11 = async_compile.triton('triton_poi_fused__to_copy_native_layer_norm_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp16', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_native_layer_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a3/ca3xpmgnwxehx4lsbwydaxsjjph6wewmw4npvhjfjzw4fbjfa5na.py # Topologically Sorted Source Nodes: [add_7, q_10], Original ATen: [aten.add] # Source node to ATen node mapping: # add_7 => add_10 # q_10 => add_11 # Graph fragment: # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_65, %view_67), kwargs = {}) # %add_11 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %add_6), kwargs = {}) triton_poi_fused_add_12 = async_compile.triton('triton_poi_fused_add_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp16', 2: '*fp16', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x0), xmask).to(tl.float32) tmp4 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ri/crikeb5achyrmejkbc7gtdrfhlofmjpa4ooqia3p5wyds3r2imju.py # Topologically Sorted Source Nodes: [add_8, x], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add_8 => add_14 # x => relu # Graph fragment: # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_71, %view_73), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_14,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_13 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp16', 1: '*fp16', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_13(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask).to(tl.float32) tmp1 = tl.load(in_ptr0 + (x2), xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp2.to(tl.float32) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = 0.0 tmp9 = tmp7 <= tmp8 tl.store(in_out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ho/cho4ldhu4kjyosnsatzw4kqe74mefqu73e35u6yjprt3t2ri2drq.py # Topologically Sorted Source Nodes: [x_1, x_3], Original ATen: [aten.add] # Source node to ATen node mapping: # x_1 => add_15 # x_3 => add_16 # Graph fragment: # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_77, %view_79), kwargs = {}) # %add_16 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_15, %add_13), kwargs = {}) triton_poi_fused_add_14 = async_compile.triton('triton_poi_fused_add_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp16', 2: '*fp16', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x2), xmask).to(tl.float32) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tmp2.to(tl.float32) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp6 = tmp5.to(tl.float32) tmp8 = tmp6 + tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/co/ccohfcblbz7sldsujk5p4eikaqtkusjwfpexqfab6dyw5j34kb7p.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_4 => add_17, add_18, mul_4, mul_5, rsqrt_2, sub_4, var_mean_2 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_16, [2]), kwargs = {correction: 0, keepdim: True}) # %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-06), kwargs = {}) # %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_17,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_16, %getitem_5), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %rsqrt_2), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_39), kwargs = {}) # %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_40), kwargs = {}) triton_poi_fused_native_layer_norm_15 = async_compile.triton('triton_poi_fused_native_layer_norm_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (1, 16), (16, 1)) assert_size_stride(primals_12, (4, 1), (1, 1)) assert_size_stride(primals_13, (4, 16), (16, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (16, 4), (4, 1)) assert_size_stride(primals_19, (16, 4), (4, 1)) assert_size_stride(primals_20, (4, 4), (4, 1)) assert_size_stride(primals_21, (16, 4), (4, 1)) assert_size_stride(primals_22, (16, 4), (4, 1)) assert_size_stride(primals_23, (4, 4), (4, 1)) assert_size_stride(primals_24, (16, 4), (4, 1)) assert_size_stride(primals_25, (16, 4), (4, 1)) assert_size_stride(primals_26, (1, 16), (16, 1)) assert_size_stride(primals_27, (4, 1), (1, 1)) assert_size_stride(primals_28, (4, 16), (16, 1)) assert_size_stride(primals_29, (4, ), (1, )) assert_size_stride(primals_30, (4, ), (1, )) assert_size_stride(primals_31, (1, 4), (4, 1)) assert_size_stride(primals_32, (4, 1), (1, 1)) assert_size_stride(primals_33, (4, 4), (4, 1)) assert_size_stride(primals_34, (4, ), (1, )) assert_size_stride(primals_35, (1, 4), (4, 1)) assert_size_stride(primals_36, (4, 1), (1, 1)) assert_size_stride(primals_37, (4, 4), (4, 1)) assert_size_stride(primals_38, (4, ), (1, )) assert_size_stride(primals_39, (4, ), (1, )) assert_size_stride(primals_40, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten._to_copy] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_3, buf3, 64, grid=grid(64), stream=stream0) del primals_3 buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(buf3, (4, 16), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_4, buf5, 64, grid=grid(64), stream=stream0) del primals_4 buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 16), (1, 4), 0), out=buf6) buf7 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_1.run(primals_5, buf7, 16, grid=grid(16), stream=stream0) del primals_5 buf8 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_6, buf9, 64, grid=grid(64), stream=stream0) del primals_6 buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm] extern_kernels.mm(buf8, reinterpret_tensor(buf9, (4, 16), (1, 4), 0), out=buf10) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_7, buf11, 64, grid=grid(64), stream=stream0) del primals_7 buf12 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf11, (4, 16), (1, 4), 0), out=buf12) buf13 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_1.run(primals_8, buf13, 16, grid=grid(16), stream=stream0) del primals_8 buf14 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf13, (4, 4), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_9, buf15, 64, grid=grid(64), stream=stream0) del primals_9 buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten.mm] extern_kernels.mm(buf14, reinterpret_tensor(buf15, (4, 16), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_8], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_10, buf17, 64, grid=grid(64), stream=stream0) del primals_10 buf18 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_8], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf17, (4, 16), (1, 4), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) # Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone] triton_poi_fused_clone_div_2.run(buf4, buf6, buf19, 256, grid=grid(256), stream=stream0) buf20 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf10, buf12, buf20, 16, 16, grid=grid(16, 16), stream=stream0) buf21 = reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf20, (16, 4, 4), (16, 4, 1), 0), out=buf21) buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf21, buf22, 256, grid=grid(256), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf25 = reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf21 # reuse # Topologically Sorted Source Nodes: [softmax, output], Original ATen: [aten._softmax, aten._to_copy] triton_poi_fused__softmax__to_copy_5.run(buf22, buf23, buf25, 256, grid=grid(256), stream=stream0) buf24 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf16, buf18, buf24, 256, grid=grid(256), stream=stream0) buf26 = reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf24, (16, 4, 4), (16, 4, 1), 0), out=buf26) buf27 = reinterpret_tensor(buf13, (16, 1), (1, 16), 0); del buf13 # reuse # Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_1.run(primals_11, buf27, 16, grid=grid(16), stream=stream0) del primals_11 buf28 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf26, buf28, 256, grid=grid(256), stream=stream0) buf29 = empty_strided_cuda((16, 1), (1, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf28, (16, 16), (16, 1), 0), buf27, out=buf29) buf30 = empty_strided_cuda((1, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_10], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_t_8.run(primals_12, buf30, 4, grid=grid(4), stream=stream0) del primals_12 buf31 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [linear_10], Original ATen: [aten.mm] extern_kernels.mm(buf29, buf30, out=buf31) buf32 = empty_strided_cuda((16, 4), (1, 16), torch.float16) # Topologically Sorted Source Nodes: [linear_11], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_0.run(primals_13, buf32, 64, grid=grid(64), stream=stream0) del primals_13 buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_11], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf28, (16, 16), (16, 1), 0), buf32, out=buf33) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_3, q_4], Original ATen: [aten.add] triton_poi_fused_add_9.run(buf31, buf33, primals_1, buf34, 64, grid=grid(64), stream=stream0) del primals_1 buf35 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf36 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_10.run(buf34, buf35, buf36, 16, grid=grid(16), stream=stream0) buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf39 = reinterpret_tensor(buf33, (4, 4, 4), (16, 4, 1), 0); del buf33 # reuse # Topologically Sorted Source Nodes: [q_5, linear_12], Original ATen: [aten.native_layer_norm, aten._to_copy] triton_poi_fused__to_copy_native_layer_norm_11.run(buf34, buf35, buf36, primals_14, primals_15, buf37, buf39, 64, grid=grid(64), stream=stream0) del primals_15 buf38 = empty_strided_cuda((4, 4), (1, 4), torch.float16) # Topologically Sorted Source Nodes: [linear_12], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_1.run(primals_17, buf38, 16, grid=grid(16), stream=stream0) del primals_17 buf40 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [linear_12], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0), buf38, out=buf40) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_13], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_18, buf41, 64, grid=grid(64), stream=stream0) del primals_18 buf42 = reinterpret_tensor(buf26, (16, 16), (16, 1), 0); del buf26 # reuse # Topologically Sorted Source Nodes: [linear_13], Original ATen: [aten.mm] extern_kernels.mm(buf40, reinterpret_tensor(buf41, (4, 16), (1, 4), 0), out=buf42) buf43 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_14], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_19, buf43, 64, grid=grid(64), stream=stream0) del primals_19 buf44 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [linear_14], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0), reinterpret_tensor(buf43, (4, 16), (1, 4), 0), out=buf44) buf45 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_15], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_16, buf45, 64, grid=grid(64), stream=stream0) del primals_16 buf46 = empty_strided_cuda((4, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_15], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_1.run(primals_20, buf46, 16, grid=grid(16), stream=stream0) del primals_20 buf47 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_15], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf46, (4, 4), (1, 4), 0), out=buf47) buf48 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_16], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_21, buf48, 64, grid=grid(64), stream=stream0) del primals_21 buf49 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_16], Original ATen: [aten.mm] extern_kernels.mm(buf47, reinterpret_tensor(buf48, (4, 16), (1, 4), 0), out=buf49) buf50 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_17], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_22, buf50, 64, grid=grid(64), stream=stream0) del primals_22 buf51 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_17], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf50, (4, 16), (1, 4), 0), out=buf51) buf52 = buf46; del buf46 # reuse # Topologically Sorted Source Nodes: [linear_18], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_1.run(primals_23, buf52, 16, grid=grid(16), stream=stream0) del primals_23 buf53 = buf50; del buf50 # reuse # Topologically Sorted Source Nodes: [linear_18], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf52, (4, 4), (1, 4), 0), out=buf53) buf54 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_19], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_24, buf54, 64, grid=grid(64), stream=stream0) del primals_24 buf55 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_19], Original ATen: [aten.mm] extern_kernels.mm(buf53, reinterpret_tensor(buf54, (4, 16), (1, 4), 0), out=buf55) buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_20], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_0.run(primals_25, buf56, 64, grid=grid(64), stream=stream0) del primals_25 buf57 = empty_strided_cuda((16, 16), (16, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_20], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf56, (4, 16), (1, 4), 0), out=buf57) buf58 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) # Topologically Sorted Source Nodes: [truediv_1, attn_2], Original ATen: [aten.div, aten.clone] triton_poi_fused_clone_div_2.run(buf42, buf44, buf58, 256, grid=grid(256), stream=stream0) del buf42 buf59 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf44 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf49, buf51, buf59, 16, 16, grid=grid(16, 16), stream=stream0) buf60 = reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0); del buf51 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf58, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0), out=buf60) buf61 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf60, buf61, 256, grid=grid(256), stream=stream0) buf62 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf64 = reinterpret_tensor(buf60, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf60 # reuse # Topologically Sorted Source Nodes: [softmax_1, output_1], Original ATen: [aten._softmax, aten._to_copy] triton_poi_fused__softmax__to_copy_5.run(buf61, buf62, buf64, 256, grid=grid(256), stream=stream0) del buf61 buf63 = reinterpret_tensor(buf49, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf49 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf55, buf57, buf63, 256, grid=grid(256), stream=stream0) buf65 = reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0); del buf57 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf64, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0), out=buf65) buf66 = reinterpret_tensor(buf52, (16, 1), (1, 16), 0); del buf52 # reuse # Topologically Sorted Source Nodes: [linear_21], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_1.run(primals_26, buf66, 16, grid=grid(16), stream=stream0) del primals_26 buf67 = reinterpret_tensor(buf55, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf55 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf65, buf67, 256, grid=grid(256), stream=stream0) del buf65 buf68 = empty_strided_cuda((16, 1), (1, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_21], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf67, (16, 16), (16, 1), 0), buf66, out=buf68) buf69 = empty_strided_cuda((1, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_22], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_t_8.run(primals_27, buf69, 4, grid=grid(4), stream=stream0) del primals_27 buf70 = buf56; del buf56 # reuse # Topologically Sorted Source Nodes: [linear_22], Original ATen: [aten.mm] extern_kernels.mm(buf68, buf69, out=buf70) buf71 = empty_strided_cuda((16, 4), (1, 16), torch.float16) # Topologically Sorted Source Nodes: [linear_23], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_0.run(primals_28, buf71, 64, grid=grid(64), stream=stream0) del primals_28 buf72 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_23], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf67, (16, 16), (16, 1), 0), buf71, out=buf72) buf73 = buf37; del buf37 # reuse # Topologically Sorted Source Nodes: [add_7, q_10], Original ATen: [aten.add] triton_poi_fused_add_12.run(buf73, buf70, buf72, 64, grid=grid(64), stream=stream0) buf74 = buf36; del buf36 # reuse buf75 = buf35; del buf35 # reuse # Topologically Sorted Source Nodes: [q_11], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_10.run(buf73, buf74, buf75, 16, grid=grid(16), stream=stream0) buf76 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf78 = reinterpret_tensor(buf72, (4, 4, 4), (16, 4, 1), 0); del buf72 # reuse # Topologically Sorted Source Nodes: [q_11, linear_24], Original ATen: [aten.native_layer_norm, aten._to_copy] triton_poi_fused__to_copy_native_layer_norm_11.run(buf73, buf74, buf75, primals_29, primals_30, buf76, buf78, 64, grid=grid(64), stream=stream0) del primals_30 buf77 = empty_strided_cuda((4, 1), (1, 4), torch.float16) # Topologically Sorted Source Nodes: [linear_24], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_t_8.run(primals_31, buf77, 4, grid=grid(4), stream=stream0) del primals_31 buf79 = empty_strided_cuda((16, 1), (1, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_24], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf78, (16, 4), (4, 1), 0), buf77, out=buf79) buf80 = empty_strided_cuda((1, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_25], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_t_8.run(primals_32, buf80, 4, grid=grid(4), stream=stream0) del primals_32 buf81 = buf70; del buf70 # reuse # Topologically Sorted Source Nodes: [linear_25], Original ATen: [aten.mm] extern_kernels.mm(buf79, buf80, out=buf81) buf82 = empty_strided_cuda((4, 4), (1, 4), torch.float16) # Topologically Sorted Source Nodes: [linear_26], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_1.run(primals_33, buf82, 16, grid=grid(16), stream=stream0) del primals_33 buf83 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf78, (16, 4), (4, 1), 0), buf82, out=buf83) buf84 = empty_strided_cuda((4, 1), (1, 4), torch.float16) # Topologically Sorted Source Nodes: [linear_27], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_t_8.run(primals_35, buf84, 4, grid=grid(4), stream=stream0) del primals_35 buf85 = reinterpret_tensor(buf81, (4, 4, 4), (16, 4, 1), 0); del buf81 # reuse buf95 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add_8, x], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_13.run(buf85, buf83, primals_34, buf95, 64, grid=grid(64), stream=stream0) del primals_34 buf86 = empty_strided_cuda((16, 1), (1, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_27], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf85, (16, 4), (4, 1), 0), buf84, out=buf86) buf87 = empty_strided_cuda((1, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [linear_28], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_t_8.run(primals_36, buf87, 4, grid=grid(4), stream=stream0) del primals_36 buf88 = buf83; del buf83 # reuse # Topologically Sorted Source Nodes: [linear_28], Original ATen: [aten.mm] extern_kernels.mm(buf86, buf87, out=buf88) buf89 = empty_strided_cuda((4, 4), (1, 4), torch.float16) # Topologically Sorted Source Nodes: [linear_29], Original ATen: [aten._to_copy, aten.t] triton_poi_fused__to_copy_1.run(primals_37, buf89, 16, grid=grid(16), stream=stream0) del primals_37 buf90 = empty_strided_cuda((16, 4), (4, 1), torch.float16) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf85, (16, 4), (4, 1), 0), buf89, out=buf90) buf91 = buf76; del buf76 # reuse # Topologically Sorted Source Nodes: [x_1, x_3], Original ATen: [aten.add] triton_poi_fused_add_14.run(buf91, buf88, buf90, primals_38, 64, grid=grid(64), stream=stream0) del buf88 del buf90 del primals_38 buf92 = buf75; del buf75 # reuse buf93 = buf74; del buf74 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_10.run(buf91, buf92, buf93, 16, grid=grid(16), stream=stream0) buf94 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_15.run(buf91, buf92, buf93, primals_39, primals_40, buf94, 64, grid=grid(64), stream=stream0) del buf92 del buf93 del primals_40 return (buf94, buf23, buf62, primals_14, primals_29, primals_39, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 16), (1, 4), 0), buf2, reinterpret_tensor(buf9, (4, 16), (1, 4), 0), buf8, reinterpret_tensor(buf15, (4, 16), (1, 4), 0), buf14, buf23, reinterpret_tensor(buf28, (16, 16), (16, 1), 0), buf29, buf34, reinterpret_tensor(buf39, (16, 4), (4, 1), 0), reinterpret_tensor(buf41, (4, 16), (1, 4), 0), buf40, reinterpret_tensor(buf43, (4, 16), (1, 4), 0), reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf48, (4, 16), (1, 4), 0), buf47, reinterpret_tensor(buf54, (4, 16), (1, 4), 0), buf53, buf62, reinterpret_tensor(buf67, (16, 16), (16, 1), 0), buf68, buf73, reinterpret_tensor(buf78, (16, 4), (4, 1), 0), buf79, reinterpret_tensor(buf85, (16, 4), (4, 1), 0), buf86, buf91, reinterpret_tensor(buf89, (4, 4), (4, 1), 0), reinterpret_tensor(buf87, (4, 1), (1, 1), 0), reinterpret_tensor(buf84, (1, 4), (4, 1), 0), buf95, reinterpret_tensor(buf82, (4, 4), (4, 1), 0), reinterpret_tensor(buf80, (4, 1), (1, 1), 0), reinterpret_tensor(buf77, (1, 4), (4, 1), 0), reinterpret_tensor(buf71, (4, 16), (16, 1), 0), reinterpret_tensor(buf69, (4, 1), (1, 1), 0), reinterpret_tensor(buf66, (1, 16), (16, 1), 0), reinterpret_tensor(buf64, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf63, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf58, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf38, (4, 4), (4, 1), 0), reinterpret_tensor(buf32, (4, 16), (16, 1), 0), reinterpret_tensor(buf30, (4, 1), (1, 1), 0), reinterpret_tensor(buf27, (1, 16), (16, 1), 0), reinterpret_tensor(buf25, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf24, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf20, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankResidualMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_qs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_res = nn.Linear(d_model, n_head * d_v, bias=False) self.fc_u = nn.Linear(int(d_model / 4), d_model, bias=False) self.fc_v = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_res = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = (self.w_qs_u(self.w_qs_v(q)) + self.w_qs_res(q)).view(sz_b, len_q, n_head, d_k) k = (self.w_ks_u(self.w_ks_v(k)) + self.w_ks_res(k)).view(sz_b, len_k, n_head, d_k) v = (self.w_vs_u(self.w_vs_v(v)) + self.w_vs_res(v)).view(sz_b, len_v, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_u(self.fc_v(q)) + self.fc_res(q)) q += residual q = self.layer_norm(q) return q, attn class LowRankResidualPositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1_u = nn.Linear(int(d_in / 4), d_hid, bias=False) self.w_1_v = nn.Linear(d_in, int(d_in / 4), bias=False) self.w_1_res = nn.Linear(d_in, d_hid) self.w_2_u = nn.Linear(int(d_in / 4), d_in, bias=False) self.w_2_v = nn.Linear(d_hid, int(d_in / 4), bias=False) self.w_2_res = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = F.relu(self.w_1_u(self.w_1_v(x)) + self.w_1_res(x)) x = self.w_2_u(self.w_2_v(x)) + self.w_2_res(x) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class LowRankResidualDecoderLayer(nn.Module): """ Compose with three layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(LowRankResidualDecoderLayer, self).__init__() self.slf_attn = LowRankResidualMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.enc_attn = LowRankResidualMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = LowRankResidualPositionwiseFeedForward(d_model, d_inner, dropout=dropout) @autocast() def forward(self, dec_input, enc_output, slf_attn_mask=None, dec_enc_attn_mask=None): dec_output, dec_slf_attn = self.slf_attn(dec_input, dec_input, dec_input, mask=slf_attn_mask) dec_output, dec_enc_attn = self.enc_attn(dec_output, enc_output, enc_output, mask=dec_enc_attn_mask) dec_output = self.pos_ffn(dec_output) return dec_output, dec_slf_attn, dec_enc_attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F from torch.cuda.amp import autocast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x4, xmask).to(tl.float32) tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp4, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask).to(tl.float32) tmp1 = tl.load(in_ptr1 + (x2 + 16 * y3), xmask & ymask).to(tl.float32) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .float32) tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.float32) tmp1 = tmp0.to(tl.float32) tmp3 = tmp2.to(tl.float32) tmp5 = tmp4.to(tl.float32) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7.to(tl.float32) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10.to(tl.float32) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x4, xmask).to(tl.float32) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp2, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl .float32) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_t_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x0, xmask).to(tl.float32) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused__to_copy_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tmp8.to(tl.float32) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x0, xmask).to(tl.float32) tmp4 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_13(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2.to(tl.float32) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = 0.0 tmp9 = tmp7 <= tmp8 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.float32) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tmp2.to(tl.float32) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp6 = tmp5.to(tl.float32) tmp8 = tmp6 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_native_layer_norm_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (1, 16), (16, 1)) assert_size_stride(primals_12, (4, 1), (1, 1)) assert_size_stride(primals_13, (4, 16), (16, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (16, 4), (4, 1)) assert_size_stride(primals_19, (16, 4), (4, 1)) assert_size_stride(primals_20, (4, 4), (4, 1)) assert_size_stride(primals_21, (16, 4), (4, 1)) assert_size_stride(primals_22, (16, 4), (4, 1)) assert_size_stride(primals_23, (4, 4), (4, 1)) assert_size_stride(primals_24, (16, 4), (4, 1)) assert_size_stride(primals_25, (16, 4), (4, 1)) assert_size_stride(primals_26, (1, 16), (16, 1)) assert_size_stride(primals_27, (4, 1), (1, 1)) assert_size_stride(primals_28, (4, 16), (16, 1)) assert_size_stride(primals_29, (4,), (1,)) assert_size_stride(primals_30, (4,), (1,)) assert_size_stride(primals_31, (1, 4), (4, 1)) assert_size_stride(primals_32, (4, 1), (1, 1)) assert_size_stride(primals_33, (4, 4), (4, 1)) assert_size_stride(primals_34, (4,), (1,)) assert_size_stride(primals_35, (1, 4), (4, 1)) assert_size_stride(primals_36, (4, 1), (1, 1)) assert_size_stride(primals_37, (4, 4), (4, 1)) assert_size_stride(primals_38, (4,), (1,)) assert_size_stride(primals_39, (4,), (1,)) assert_size_stride(primals_40, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_2, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_3, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf2, reinterpret_tensor(buf3, (4, 16), (1, 4), 0 ), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_4, buf5, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_4 buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 16), (1, 4), 0), out=buf6) buf7 = buf1 del buf1 triton_poi_fused__to_copy_1[grid(16)](primals_5, buf7, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_5 buf8 = buf5 del buf5 extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_6, buf9, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_6 buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf8, reinterpret_tensor(buf9, (4, 16), (1, 4), 0 ), out=buf10) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_7, buf11, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_7 buf12 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf11, (4, 16), (1, 4), 0), out=buf12) buf13 = buf7 del buf7 triton_poi_fused__to_copy_1[grid(16)](primals_8, buf13, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_8 buf14 = buf11 del buf11 extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf13, (4, 4), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_9, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_9 buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf14, reinterpret_tensor(buf15, (4, 16), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_10, buf17, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_10 buf18 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf17, (4, 16), (1, 4), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_2[grid(256)](buf4, buf6, buf19, 256, XBLOCK=256, num_warps=4, num_stages=1) buf20 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 16)](buf10, buf12, buf20, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf21 = reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0) del buf12 extern_kernels.bmm(reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf20, (16, 4, 4), (16, 4, 1), 0), out=buf21 ) buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf21, buf22, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf25 = reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf21 triton_poi_fused__softmax__to_copy_5[grid(256)](buf22, buf23, buf25, 256, XBLOCK=256, num_warps=4, num_stages=1) buf24 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf10 triton_poi_fused_clone_6[grid(256)](buf16, buf18, buf24, 256, XBLOCK=256, num_warps=4, num_stages=1) buf26 = reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0) del buf18 extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf24, (16, 4, 4), (16, 4, 1), 0), out=buf26 ) buf27 = reinterpret_tensor(buf13, (16, 1), (1, 16), 0) del buf13 triton_poi_fused__to_copy_1[grid(16)](primals_11, buf27, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_11 buf28 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf16 triton_poi_fused_clone_7[grid(256)](buf26, buf28, 256, XBLOCK=256, num_warps=4, num_stages=1) buf29 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf28, (16, 16), (16, 1), 0), buf27, out=buf29) buf30 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_12, buf30, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_12 buf31 = buf17 del buf17 extern_kernels.mm(buf29, buf30, out=buf31) buf32 = empty_strided_cuda((16, 4), (1, 16), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_13, buf32, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_13 buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf28, (16, 16), (16, 1), 0), buf32, out=buf33) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_9[grid(64)](buf31, buf33, primals_1, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf35 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf36 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_10[grid(16)](buf34, buf35, buf36, 16, XBLOCK=16, num_warps=1, num_stages=1) buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf39 = reinterpret_tensor(buf33, (4, 4, 4), (16, 4, 1), 0) del buf33 triton_poi_fused__to_copy_native_layer_norm_11[grid(64)](buf34, buf35, buf36, primals_14, primals_15, buf37, buf39, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_15 buf38 = empty_strided_cuda((4, 4), (1, 4), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_17, buf38, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_17 buf40 = buf31 del buf31 extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0), buf38, out=buf40) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_18, buf41, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_18 buf42 = reinterpret_tensor(buf26, (16, 16), (16, 1), 0) del buf26 extern_kernels.mm(buf40, reinterpret_tensor(buf41, (4, 16), (1, 4), 0), out=buf42) buf43 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_19, buf43, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_19 buf44 = buf4 del buf4 extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0), reinterpret_tensor(buf43, (4, 16), (1, 4), 0), out=buf44) buf45 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_16, buf45, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_16 buf46 = empty_strided_cuda((4, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_20, buf46, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_20 buf47 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf46, (4, 4), (1, 4), 0), out=buf47) buf48 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_21, buf48, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_21 buf49 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf47, reinterpret_tensor(buf48, (4, 16), (1, 4), 0), out=buf49) buf50 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_22, buf50, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_22 buf51 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf50, (4, 16), (1, 4), 0), out=buf51) buf52 = buf46 del buf46 triton_poi_fused__to_copy_1[grid(16)](primals_23, buf52, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_23 buf53 = buf50 del buf50 extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf52, (4, 4), (1, 4), 0), out=buf53) buf54 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_24, buf54, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_24 buf55 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(buf53, reinterpret_tensor(buf54, (4, 16), (1, 4), 0), out=buf55) buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_25, buf56, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_25 buf57 = empty_strided_cuda((16, 16), (16, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf56, (4, 16), (1, 4), 0), out=buf57) buf58 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) triton_poi_fused_clone_div_2[grid(256)](buf42, buf44, buf58, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf42 buf59 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf44 triton_poi_fused_clone_3[grid(16, 16)](buf49, buf51, buf59, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf60 = reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0) del buf51 extern_kernels.bmm(reinterpret_tensor(buf58, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0), out=buf60 ) buf61 = buf22 del buf22 triton_poi_fused__softmax_4[grid(256)](buf60, buf61, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf62 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf64 = reinterpret_tensor(buf60, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf60 triton_poi_fused__softmax__to_copy_5[grid(256)](buf61, buf62, buf64, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf61 buf63 = reinterpret_tensor(buf49, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf49 triton_poi_fused_clone_6[grid(256)](buf55, buf57, buf63, 256, XBLOCK=256, num_warps=4, num_stages=1) buf65 = reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0) del buf57 extern_kernels.bmm(reinterpret_tensor(buf64, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0), out=buf65 ) buf66 = reinterpret_tensor(buf52, (16, 1), (1, 16), 0) del buf52 triton_poi_fused__to_copy_1[grid(16)](primals_26, buf66, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_26 buf67 = reinterpret_tensor(buf55, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf55 triton_poi_fused_clone_7[grid(256)](buf65, buf67, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf65 buf68 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf67, (16, 16), (16, 1), 0), buf66, out=buf68) buf69 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_27, buf69, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_27 buf70 = buf56 del buf56 extern_kernels.mm(buf68, buf69, out=buf70) buf71 = empty_strided_cuda((16, 4), (1, 16), torch.float16) triton_poi_fused__to_copy_0[grid(64)](primals_28, buf71, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_28 buf72 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf67, (16, 16), (16, 1), 0), buf71, out=buf72) buf73 = buf37 del buf37 triton_poi_fused_add_12[grid(64)](buf73, buf70, buf72, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf74 = buf36 del buf36 buf75 = buf35 del buf35 triton_poi_fused_native_layer_norm_10[grid(16)](buf73, buf74, buf75, 16, XBLOCK=16, num_warps=1, num_stages=1) buf76 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf78 = reinterpret_tensor(buf72, (4, 4, 4), (16, 4, 1), 0) del buf72 triton_poi_fused__to_copy_native_layer_norm_11[grid(64)](buf73, buf74, buf75, primals_29, primals_30, buf76, buf78, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_30 buf77 = empty_strided_cuda((4, 1), (1, 4), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_31, buf77, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_31 buf79 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf78, (16, 4), (4, 1), 0), buf77, out=buf79) buf80 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_32, buf80, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_32 buf81 = buf70 del buf70 extern_kernels.mm(buf79, buf80, out=buf81) buf82 = empty_strided_cuda((4, 4), (1, 4), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_33, buf82, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_33 buf83 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf78, (16, 4), (4, 1), 0), buf82, out=buf83) buf84 = empty_strided_cuda((4, 1), (1, 4), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_35, buf84, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_35 buf85 = reinterpret_tensor(buf81, (4, 4, 4), (16, 4, 1), 0) del buf81 buf95 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_13[grid(64)](buf85, buf83, primals_34, buf95, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_34 buf86 = empty_strided_cuda((16, 1), (1, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf85, (16, 4), (4, 1), 0), buf84, out=buf86) buf87 = empty_strided_cuda((1, 4), (4, 1), torch.float16) triton_poi_fused__to_copy_t_8[grid(4)](primals_36, buf87, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_36 buf88 = buf83 del buf83 extern_kernels.mm(buf86, buf87, out=buf88) buf89 = empty_strided_cuda((4, 4), (1, 4), torch.float16) triton_poi_fused__to_copy_1[grid(16)](primals_37, buf89, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_37 buf90 = empty_strided_cuda((16, 4), (4, 1), torch.float16) extern_kernels.mm(reinterpret_tensor(buf85, (16, 4), (4, 1), 0), buf89, out=buf90) buf91 = buf76 del buf76 triton_poi_fused_add_14[grid(64)](buf91, buf88, buf90, primals_38, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf88 del buf90 del primals_38 buf92 = buf75 del buf75 buf93 = buf74 del buf74 triton_poi_fused_native_layer_norm_10[grid(16)](buf91, buf92, buf93, 16, XBLOCK=16, num_warps=1, num_stages=1) buf94 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_15[grid(64)](buf91, buf92, buf93, primals_39, primals_40, buf94, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf92 del buf93 del primals_40 return (buf94, buf23, buf62, primals_14, primals_29, primals_39, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor( buf3, (4, 16), (1, 4), 0), buf2, reinterpret_tensor(buf9, (4, 16), (1, 4), 0), buf8, reinterpret_tensor(buf15, (4, 16), (1, 4), 0), buf14, buf23, reinterpret_tensor(buf28, (16, 16), (16, 1), 0), buf29, buf34, reinterpret_tensor(buf39, (16, 4), (4, 1), 0), reinterpret_tensor(buf41, (4, 16), (1, 4), 0), buf40, reinterpret_tensor(buf43, (4, 16), (1, 4), 0), reinterpret_tensor( buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf48, (4, 16), (1, 4), 0), buf47, reinterpret_tensor(buf54, (4, 16), (1, 4), 0), buf53, buf62, reinterpret_tensor(buf67, (16, 16), (16, 1), 0), buf68, buf73, reinterpret_tensor(buf78, (16, 4), (4, 1), 0), buf79, reinterpret_tensor(buf85, (16, 4), (4, 1), 0), buf86, buf91, reinterpret_tensor(buf89, (4, 4), (4, 1), 0), reinterpret_tensor( buf87, (4, 1), (1, 1), 0), reinterpret_tensor(buf84, (1, 4), (4, 1), 0), buf95, reinterpret_tensor(buf82, (4, 4), (4, 1), 0), reinterpret_tensor(buf80, (4, 1), (1, 1), 0), reinterpret_tensor( buf77, (1, 4), (4, 1), 0), reinterpret_tensor(buf71, (4, 16), (16, 1), 0), reinterpret_tensor(buf69, (4, 1), (1, 1), 0), reinterpret_tensor(buf66, (1, 16), (16, 1), 0), reinterpret_tensor( buf64, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf63, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf58, (16, 4, 4), (16, 1, 4 ), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf38, (4, 4), (4, 1), 0), reinterpret_tensor( buf32, (4, 16), (16, 1), 0), reinterpret_tensor(buf30, (4, 1), (1, 1), 0), reinterpret_tensor(buf27, (1, 16), (16, 1), 0), reinterpret_tensor(buf25, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf24, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf20, (16, 4, 4), (16, 1, 4), 0)) class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) @autocast() def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -2 ** 15) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class LowRankResidualMultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_qs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_qs_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_ks_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_ks_res = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False ) self.w_vs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False) self.w_vs_res = nn.Linear(d_model, n_head * d_v, bias=False) self.fc_u = nn.Linear(int(d_model / 4), d_model, bias=False) self.fc_v = nn.Linear(n_head * d_v, int(d_model / 4), bias=False) self.fc_res = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) @autocast() def forward(self, q, k, v, mask=None): d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = (self.w_qs_u(self.w_qs_v(q)) + self.w_qs_res(q)).view(sz_b, len_q, n_head, d_k) k = (self.w_ks_u(self.w_ks_v(k)) + self.w_ks_res(k)).view(sz_b, len_k, n_head, d_k) v = (self.w_vs_u(self.w_vs_v(v)) + self.w_vs_res(v)).view(sz_b, len_v, n_head, d_k) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc_u(self.fc_v(q)) + self.fc_res(q)) q += residual q = self.layer_norm(q) return q, attn class LowRankResidualPositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1_u = nn.Linear(int(d_in / 4), d_hid, bias=False) self.w_1_v = nn.Linear(d_in, int(d_in / 4), bias=False) self.w_1_res = nn.Linear(d_in, d_hid) self.w_2_u = nn.Linear(int(d_in / 4), d_in, bias=False) self.w_2_v = nn.Linear(d_hid, int(d_in / 4), bias=False) self.w_2_res = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) @autocast() def forward(self, x): residual = x x = F.relu(self.w_1_u(self.w_1_v(x)) + self.w_1_res(x)) x = self.w_2_u(self.w_2_v(x)) + self.w_2_res(x) x = self.dropout(x) x += residual x = self.layer_norm(x) return x class LowRankResidualDecoderLayerNew(nn.Module): """ Compose with three layers """ def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(LowRankResidualDecoderLayerNew, self).__init__() self.slf_attn = LowRankResidualMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.enc_attn = LowRankResidualMultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = LowRankResidualPositionwiseFeedForward(d_model, d_inner, dropout=dropout) def forward(self, input_0, input_1): primals_3 = self.slf_attn.w_qs_u.weight primals_2 = self.slf_attn.w_qs_v.weight primals_4 = self.slf_attn.w_qs_res.weight primals_6 = self.slf_attn.w_ks_u.weight primals_5 = self.slf_attn.w_ks_v.weight primals_7 = self.slf_attn.w_ks_res.weight primals_9 = self.slf_attn.w_vs_u.weight primals_8 = self.slf_attn.w_vs_v.weight primals_10 = self.slf_attn.w_vs_res.weight primals_12 = self.slf_attn.fc_u.weight primals_11 = self.slf_attn.fc_v.weight primals_13 = self.slf_attn.fc_res.weight primals_14 = self.slf_attn.layer_norm.weight primals_15 = self.slf_attn.layer_norm.bias primals_18 = self.enc_attn.w_qs_u.weight primals_17 = self.enc_attn.w_qs_v.weight primals_19 = self.enc_attn.w_qs_res.weight primals_21 = self.enc_attn.w_ks_u.weight primals_20 = self.enc_attn.w_ks_v.weight primals_22 = self.enc_attn.w_ks_res.weight primals_24 = self.enc_attn.w_vs_u.weight primals_23 = self.enc_attn.w_vs_v.weight primals_25 = self.enc_attn.w_vs_res.weight primals_27 = self.enc_attn.fc_u.weight primals_26 = self.enc_attn.fc_v.weight primals_28 = self.enc_attn.fc_res.weight primals_29 = self.enc_attn.layer_norm.weight primals_30 = self.enc_attn.layer_norm.bias primals_32 = self.pos_ffn.w_1_u.weight primals_31 = self.pos_ffn.w_1_v.weight primals_33 = self.pos_ffn.w_1_res.weight primals_34 = self.pos_ffn.w_1_res.bias primals_36 = self.pos_ffn.w_2_u.weight primals_35 = self.pos_ffn.w_2_v.weight primals_37 = self.pos_ffn.w_2_res.weight primals_38 = self.pos_ffn.w_2_res.bias primals_39 = self.pos_ffn.layer_norm.weight primals_40 = self.pos_ffn.layer_norm.bias primals_1 = input_0 primals_16 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40]) return output[0], output[1], output[2]
bahducoup/factorized_training
LowRankResidualDecoderLayer
false
12,288
[ "MIT" ]
0
0af38f16338a9bcfcc11091b1a6b75befd67f234
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/eh/ceheq5ns3kg3p6tebb47gdy475c5v4keklf245jl4fgbpvugznm5.py # Topologically Sorted Source Nodes: [dot_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dot_2 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [dot_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dot_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 16), (16, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf1, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf0, buf4, 256, grid=grid(256), stream=stream0) buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [dot], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [dot_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, buf8, 256, grid=grid(256), stream=stream0) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf9, buf10, 256, grid=grid(256), stream=stream0) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf11) del primals_6 return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class SelfAttention(nn.Module): def __init__(self, embedding_dimension, num_heads): super().__init__() assert embedding_dimension % num_heads == 0, f'embedding dimension must be divisible by number of heads, got embedding_dimension={embedding_dimension!r}, num_heads={num_heads!r}' self.num_heads = num_heads k = embedding_dimension self.to_keys = nn.Linear(k, k * num_heads, bias=False) self.to_queries = nn.Linear(k, k * num_heads, bias=False) self.to_values = nn.Linear(k, k * num_heads, bias=False) self.unify_heads = nn.Linear(num_heads * k, k) def forward(self, x): b, t, k = x.size() h = self.num_heads keys = self.to_keys(x).view(b, t, h, k) queries = self.to_queries(x).view(b, t, h, k) values = self.to_values(x).view(b, t, h, k) keys = keys.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries.transpose(1, 2).contiguous().view(b * h, t, k) values = values.transpose(1, 2).contiguous().view(b * h, t, k) dot = torch.bmm(queries, keys.transpose(1, 2)) dot = dot / k ** (1 / 2) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, k) out = out.transpose(1, 2).contiguous().view(b, t, h * k) return self.unify_heads(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embedding_dimension': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 16), (16, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_clone_0[grid(256)](buf0, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_0[grid(256)](buf2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf11) del primals_6 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0 ), primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, embedding_dimension, num_heads): super().__init__() assert embedding_dimension % num_heads == 0, f'embedding dimension must be divisible by number of heads, got embedding_dimension={embedding_dimension!r}, num_heads={num_heads!r}' self.num_heads = num_heads k = embedding_dimension self.to_keys = nn.Linear(k, k * num_heads, bias=False) self.to_queries = nn.Linear(k, k * num_heads, bias=False) self.to_values = nn.Linear(k, k * num_heads, bias=False) self.unify_heads = nn.Linear(num_heads * k, k) def forward(self, input_0): primals_2 = self.to_keys.weight primals_3 = self.to_queries.weight primals_4 = self.to_values.weight primals_5 = self.unify_heads.weight primals_6 = self.unify_heads.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
dimitrios-ebi/gene_symbol_classifier
SelfAttention
false
12,289
[ "Apache-2.0" ]
0
fe415f719fda4619041b9fe0639996c92e0f12a8
https://github.com/dimitrios-ebi/gene_symbol_classifier/tree/fe415f719fda4619041b9fe0639996c92e0f12a8
MHAttentionMap
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/gq/cgqprvbahmjs767ftakyktucj3aphyjb6jhxbuw5jm4fj5rx2u7i.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution] # Source node to ATen node mapping: # k => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_6, %unsqueeze_1, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/g2/cg266bzrczdgrg4t2bizcm563ududawebjvz37s7yg2panbw22xg.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_8, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_8, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 # Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_6, reinterpret_tensor(primals_4, (4, 4, 1, 1), (4, 1, 1, 1), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf2, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [weights], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf4) buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_2.run(buf4, buf7, 16, 16, grid=grid(16), stream=stream0) del buf4 return (reinterpret_tensor(buf7, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0), primals_6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf7, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F import torch._utils class MHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads=1, dropout=0.0, bias=True): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) nn.init.zeros_(self.k_linear.bias) nn.init.zeros_(self.q_linear.bias) nn.init.xavier_uniform_(self.k_linear.weight) nn.init.xavier_uniform_(self.q_linear.weight) self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 def forward(self, q, k): q = self.q_linear(q) k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self. num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum('bqnc,bnchw->bqnhw', qh * self. normalize_fact, kh) weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights) weights = self.dropout(weights) return weights def get_inputs(): return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'query_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = extern_kernels.convolution(primals_6, reinterpret_tensor( primals_4, (4, 4, 1, 1), (4, 1, 1, 1), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf2, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf3 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf4) buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_per_fused__softmax_2[grid(16)](buf4, buf7, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf4 return reinterpret_tensor(buf7, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0 ), primals_6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (4, 4, 1, 1), (4, 1, 1, 1), 0 ), buf7, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0) class MHAttentionMapNew(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads=1, dropout=0.0, bias=True): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) nn.init.zeros_(self.k_linear.bias) nn.init.zeros_(self.q_linear.bias) nn.init.xavier_uniform_(self.k_linear.weight) nn.init.xavier_uniform_(self.q_linear.weight) self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 def forward(self, input_0, input_1): primals_1 = self.q_linear.weight primals_2 = self.q_linear.bias primals_4 = self.k_linear.weight primals_5 = self.k_linear.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
dingmyu/mmclassification
MHAttentionMap
false
12,290
[ "Apache-2.0" ]
0
c600b22907fb9423899f7c308c659168c2d01cd8
https://github.com/dingmyu/mmclassification/tree/c600b22907fb9423899f7c308c659168c2d01cd8
GNNExplainerProbe
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/tx/ctx2zj7hbn4sliig4pcxh5fkp24urbsykncakmb56opn3qyr6bsf.py # Topologically Sorted Source Nodes: [s, neg, log, mul, sub, log_1, mul_1, mask_ent, mean, sum_1, mul_2, penalty], Original ATen: [aten.sigmoid, aten.neg, aten.log, aten.mul, aten.rsub, aten.sub, aten.mean, aten.sum, aten.add] # Source node to ATen node mapping: # log => log # log_1 => log_1 # mask_ent => sub_2 # mean => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # neg => neg # penalty => add # s => sigmoid # sub => sub # sum_1 => sum_1 # Graph fragment: # %sigmoid : [num_users=5] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sigmoid,), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %log), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sigmoid,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mul_2), kwargs = {}) triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.sigmoid(tmp0) tmp2 = -tmp1 tmp3 = tl_math.log(tmp1) tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp6 * tmp7 tmp9 = tmp4 - tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 4.0 tmp17 = tmp12 / tmp16 tmp18 = 0.5 tmp19 = tmp15 * tmp18 tmp20 = tmp17 + tmp19 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp1, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [s, neg, log, mul, sub, log_1, mul_1, mask_ent, mean, sum_1, mul_2, penalty], Original ATen: [aten.sigmoid, aten.neg, aten.log, aten.mul, aten.rsub, aten.sub, aten.mean, aten.sum, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0.run(buf3, primals_1, buf0, 1, 4, grid=grid(1), stream=stream0) del primals_1 return (buf0, buf3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch class AbstractTorchModule(torch.nn.Module): def __init__(self): torch.nn.Module.__init__(self) def save(self, path): None torch.save(self.state_dict(), path) def load(self, path): None self.load_state_dict(torch.load(path, map_location=self.device)) def set_device(self, device): self.device = device self class GNNExplainerProbe(AbstractTorchModule): def __init__(self, num_edges, num_layers, init_strategy='normal', const_val=1.0): super(GNNExplainerProbe, self).__init__() mask = torch.empty((num_layers, num_edges)) if init_strategy == 'normal': std = torch.nn.init.calculate_gain('relu') * math.sqrt(2.0 / (2 * math.sqrt(num_edges))) with torch.no_grad(): mask.normal_(1.0, std) elif init_strategy == 'const': torch.nn.init.constant_(mask, const_val) self.mask = torch.nn.Parameter(mask) def forward(self): s = torch.sigmoid(self.mask) mask_ent = -s * torch.log(s) - (1 - s) * torch.log(1 - s) penalty = mask_ent.mean() + 0.5 * s.sum() return s, penalty def get_inputs(): return [] def get_init_inputs(): return [[], {'num_edges': 4, 'num_layers': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp2 = -tmp1 tmp3 = tl_math.log(tmp1) tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp6 * tmp7 tmp9 = tmp4 - tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 4.0 tmp17 = tmp12 / tmp16 tmp18 = 0.5 tmp19 = tmp15 * tmp18 tmp20 = tmp17 + tmp19 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp1, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0[grid(1)]( buf3, primals_1, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 return buf0, buf3, buf0 class AbstractTorchModule(torch.nn.Module): def __init__(self): torch.nn.Module.__init__(self) def save(self, path): None torch.save(self.state_dict(), path) def load(self, path): None self.load_state_dict(torch.load(path, map_location=self.device)) def set_device(self, device): self.device = device self class GNNExplainerProbeNew(AbstractTorchModule): def __init__(self, num_edges, num_layers, init_strategy='normal', const_val=1.0): super(GNNExplainerProbeNew, self).__init__() mask = torch.empty((num_layers, num_edges)) if init_strategy == 'normal': std = torch.nn.init.calculate_gain('relu') * math.sqrt(2.0 / (2 * math.sqrt(num_edges))) with torch.no_grad(): mask.normal_(1.0, std) elif init_strategy == 'const': torch.nn.init.constant_(mask, const_val) self.mask = torch.nn.Parameter(mask) def forward(self): primals_1 = self.mask output = call([primals_1]) return output[0], output[1]
djz233/GraphMask
GNNExplainerProbe
false
12,291
[ "MIT" ]
0
4b699a1685f0d26973bb90cd75b09d74726cdc2f
https://github.com/djz233/GraphMask/tree/4b699a1685f0d26973bb90cd75b09d74726cdc2f
DenseGCNConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ia/ciadpx4ik7ocmsqscox73ya2qxek7ekkw25w6rijleysqp7lfliw.py # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # setitem => full_default, index_put # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_2, [None, %iota, %iota], %full_default), kwargs = {}) triton_poi_fused_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3a/c3ajks6gn2yuzdnbqm5gdhl6nslpowgffuevr4gjguhn4q6d6v4b.py # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # setitem => full_default, index_put # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_2, [None, %iota, %iota], %full_default), kwargs = {}) triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) tmp0 = 1.0 tl.store(out_ptr0 + ((5*x0) + (16*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/by/cbyfwvqabdnuhk62vp6oux4o4uoccm6mz5rjytdtikttvdu3mbex.py # Topologically Sorted Source Nodes: [mul, adj_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # adj_1 => mul_1 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %index_put), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x4 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (x4), xmask) tmp13 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = -0.5 tmp10 = libdevice.pow(tmp8, tmp9) tmp12 = tmp10 * tmp11 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp20 = triton_helpers.maximum(tmp19, tmp7) tmp21 = libdevice.pow(tmp20, tmp9) tmp22 = tmp12 * tmp21 tl.store(out_ptr0 + (x4), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kl/ckllf6mazqhprwh2tau7vxrdurc2kivba4tykq3gqxvebmdet63s.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # out_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5a/c5ainvzw5uinosmtjpfsqml2wdleittg5twoainmhbtwdqayr4sw.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] # Source node to ATen node mapping: # out_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_4), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] stream0 = get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] triton_poi_fused_index_put_lift_fresh_1.run(buf0, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, adj_1], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf0, buf3, 64, grid=grid(64), stream=stream0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf5) del buf2 buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf6, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf6, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch.nn import Parameter import torch.utils.data def glorot(tensor): if tensor is not None: stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1))) tensor.data.uniform_(-stdv, stdv) def zeros(tensor): if tensor is not None: tensor.data.fill_(0) class DenseGCNConv(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GCNConv`. """ def __init__(self, in_channels, out_channels, improved=False, bias=True): super(DenseGCNConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.improved = improved self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): glorot(self.weight) zeros(self.bias) def forward(self, x, adj, mask=None, add_loop=True): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) add_loop (bool, optional): If set to :obj:`False`, the layer will not automatically add self-loops to the adjacency matrices. (default: :obj:`True`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() if add_loop: adj = adj.clone() idx = torch.arange(N, dtype=torch.long, device=adj.device) adj[:, idx, idx] = 1 if not self.improved else 2 out = torch.matmul(x, self.weight) deg_inv_sqrt = adj.sum(dim=-1).clamp(min=1).pow(-0.5) adj = deg_inv_sqrt.unsqueeze(-1) * adj * deg_inv_sqrt.unsqueeze(-2) out = torch.matmul(adj, out) if self.bias is not None: out = out + self.bias if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 tmp0 = 1.0 tl.store(out_ptr0 + (5 * x0 + 16 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + x4, xmask) tmp13 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = -0.5 tmp10 = libdevice.pow(tmp8, tmp9) tmp12 = tmp10 * tmp11 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp20 = triton_helpers.maximum(tmp19, tmp7) tmp21 = libdevice.pow(tmp20, tmp9) tmp22 = tmp12 * tmp21 tl.store(out_ptr0 + x4, tmp22, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 triton_poi_fused_index_put_lift_fresh_1[grid(16)](buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(64)](buf0, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf5) del buf2 buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_4[grid(256)](buf6, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf6, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0) def glorot(tensor): if tensor is not None: stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1))) tensor.data.uniform_(-stdv, stdv) def zeros(tensor): if tensor is not None: tensor.data.fill_(0) class DenseGCNConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GCNConv`. """ def __init__(self, in_channels, out_channels, improved=False, bias=True): super(DenseGCNConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.improved = improved self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): glorot(self.weight) zeros(self.bias) def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
douglasrizzo/pytorch_geometric
DenseGCNConv
false
12,292
[ "MIT" ]
0
effc617c6ad6daad506038bb79e4407082e74740
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
LayerNormLSTMCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/7e/c7edgnsiuilw7uzwau7radvkvvtmowm7d7uh56mczbhieiykfrnx.py # Topologically Sorted Source Nodes: [hx], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # hx => full_default # Graph fragment: # %full_default : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2f/c2fkx6tbj26h4otrn2qrjtiwj3hyc22tyc25wwddmjpxxckktv6j.py # Topologically Sorted Source Nodes: [layer_norm, layer_norm_1, gates], Original ATen: [aten.native_layer_norm, aten.add] # Source node to ATen node mapping: # gates => add_4 # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # layer_norm_1 => add_2, add_3, mul_2, mul_3, rsqrt_1, sub_1, var_mean_1 # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%addmm, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%addmm_1, [1]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_8), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_9), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %add_3), kwargs = {}) triton_per_fused_add_native_layer_norm_1 = async_compile.triton('triton_per_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_native_layer_norm_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_native_layer_norm_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp42 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr3 + (r1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr4 + (r1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr5 + (r1), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tmp29 / tmp9 tmp31 = tmp23 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp37 = tmp36 / tmp17 tmp38 = tmp37 + tmp19 tmp39 = libdevice.rsqrt(tmp38) tmp40 = tmp0 - tmp10 tmp41 = tmp40 * tmp21 tmp43 = tmp41 * tmp42 tmp45 = tmp43 + tmp44 tmp46 = tmp22 - tmp30 tmp47 = tmp46 * tmp39 tmp49 = tmp47 * tmp48 tmp51 = tmp49 + tmp50 tmp52 = tmp45 + tmp51 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp39, xmask) tl.store(out_ptr2 + (r1 + (16*x0)), tmp52, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) tl.store(out_ptr1 + (x0), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ms/cms6ctu6l3gsya7bcfoi6ljxkz24qfnsh4dphpozj76xxmuypt3i.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_2,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3p/c3pqbidnyc7ibhtbltkpvphbztgjerdojzgow6gd25qnvcpr6w7i.py # Topologically Sorted Source Nodes: [g, mul, mul_1, cy], Original ATen: [aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # cy => add_5 # g => tanh # mul => mul_4 # mul_1 => mul_5 # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%slice_4,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_5, %full_default), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_4, %tanh), kwargs = {}) # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) triton_poi_fused_add_mul_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp4 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask) tmp1 = 0.0 tmp2 = tmp0 * tmp1 tmp5 = libdevice.tanh(tmp4) tmp6 = tmp3 * tmp5 tmp7 = tmp2 + tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vp/cvpke7vqb5rcrpra7r6jl5ewicc4xljasp4hd4x25ttzukjdcria.py # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_2 => add_6, rsqrt_2, var_mean_2 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [1]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_7, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) triton_poi_fused_native_layer_norm_4 = async_compile.triton('triton_poi_fused_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ub/cubfrt3yztsismm4xpw5raox6rdfhcpimoypwjc2xctqj25kkzdc.py # Topologically Sorted Source Nodes: [layer_norm_2, tanh_1, hy], Original ATen: [aten.native_layer_norm, aten.tanh, aten.mul] # Source node to ATen node mapping: # hy => mul_8 # layer_norm_2 => add_6, add_7, mul_6, mul_7, rsqrt_2, sub_2, var_mean_2 # tanh_1 => tanh_1 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [1]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_7, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %getitem_8), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_2), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %primals_10), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %primals_11), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_7,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_6, %tanh_1), kwargs = {}) triton_poi_fused_mul_native_layer_norm_tanh_5 = async_compile.triton('triton_poi_fused_mul_native_layer_norm_tanh_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_native_layer_norm_tanh_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_native_layer_norm_tanh_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = tmp0 * tmp10 tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (16, ), (1, )) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (16, ), (1, )) assert_size_stride(primals_9, (16, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hx], Original ATen: [aten.new_zeros] stream0 = get_raw_stream(0) triton_poi_fused_new_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_6 del primals_7 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0); del buf3 # reuse buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf10 = reinterpret_tensor(buf8, (4, 1), (1, 1), 0); del buf8 # reuse buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm, layer_norm_1, gates], Original ATen: [aten.native_layer_norm, aten.add] triton_per_fused_add_native_layer_norm_1.run(buf5, buf10, buf1, buf6, primals_4, primals_5, primals_8, primals_9, buf2, buf7, buf11, 4, 16, grid=grid(4), stream=stream0) buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf11, buf12, 48, grid=grid(48), stream=stream0) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [g, mul, mul_1, cy], Original ATen: [aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_tanh_3.run(buf12, buf11, buf13, 16, grid=grid(16), stream=stream0) del buf11 buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_4.run(buf13, buf14, buf15, 4, grid=grid(4), stream=stream0) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_2, tanh_1, hy], Original ATen: [aten.native_layer_norm, aten.tanh, aten.mul] triton_poi_fused_mul_native_layer_norm_tanh_5.run(buf12, buf13, buf14, buf15, primals_10, primals_11, buf16, 16, grid=grid(16), stream=stream0) del buf14 del buf15 return (buf16, buf13, primals_1, primals_4, primals_5, primals_8, primals_9, primals_10, primals_11, buf0, buf1, buf2, buf5, buf6, buf7, buf10, reinterpret_tensor(buf12, (4, 4), (12, 1), 0), reinterpret_tensor(buf12, (4, 4), (12, 1), 8), buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.distributed import torch import torch.nn as nn import torch.nn.functional as F class LayerNormLSTMCell(nn.LSTMCell): def __init__(self, input_size, hidden_size, bias=True): super().__init__(input_size, hidden_size, bias) self.ln_ih = nn.LayerNorm(4 * hidden_size) self.ln_hh = nn.LayerNorm(4 * hidden_size) self.ln_ho = nn.LayerNorm(hidden_size) def forward(self, input, hidden=None): if hidden is None: hx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False) cx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False) else: hx, cx = hidden gates = self.ln_ih(F.linear(input, self.weight_ih, self.bias_ih) ) + self.ln_hh(F.linear(hx, self.weight_hh, self.bias_hh)) i, f, o = gates[:, :3 * self.hidden_size].sigmoid().chunk(3, 1) g = gates[:, 3 * self.hidden_size:].tanh() cy = f * cx + i * g hy = o * self.ln_ho(cy).tanh() return hy, cy def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.distributed import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_per_fused_add_native_layer_norm_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp42 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr4 + r1, None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr5 + r1, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tmp29 / tmp9 tmp31 = tmp23 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp37 = tmp36 / tmp17 tmp38 = tmp37 + tmp19 tmp39 = libdevice.rsqrt(tmp38) tmp40 = tmp0 - tmp10 tmp41 = tmp40 * tmp21 tmp43 = tmp41 * tmp42 tmp45 = tmp43 + tmp44 tmp46 = tmp22 - tmp30 tmp47 = tmp46 * tmp39 tmp49 = tmp47 * tmp48 tmp51 = tmp49 + tmp50 tmp52 = tmp45 + tmp51 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp39, xmask) tl.store(out_ptr2 + (r1 + 16 * x0), tmp52, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) @triton.jit def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp3 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp4 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 * tmp1 tmp5 = libdevice.tanh(tmp4) tmp6 = tmp3 * tmp5 tmp7 = tmp2 + tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_mul_native_layer_norm_tanh_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = tmp0 * tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_6 del primals_7 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0) del buf3 buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf10 = reinterpret_tensor(buf8, (4, 1), (1, 1), 0) del buf8 buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_per_fused_add_native_layer_norm_1[grid(4)](buf5, buf10, buf1, buf6, primals_4, primals_5, primals_8, primals_9, buf2, buf7, buf11, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_sigmoid_2[grid(48)](buf11, buf12, 48, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_tanh_3[grid(16)](buf12, buf11, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf11 buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_native_layer_norm_4[grid(4)](buf13, buf14, buf15, 4, XBLOCK=4, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_native_layer_norm_tanh_5[grid(16)](buf12, buf13, buf14, buf15, primals_10, primals_11, buf16, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf14 del buf15 return (buf16, buf13, primals_1, primals_4, primals_5, primals_8, primals_9, primals_10, primals_11, buf0, buf1, buf2, buf5, buf6, buf7, buf10, reinterpret_tensor(buf12, (4, 4), (12, 1), 0), reinterpret_tensor(buf12, (4, 4), (12, 1), 8), buf13) class LayerNormLSTMCellNew(nn.LSTMCell): def __init__(self, input_size, hidden_size, bias=True): super().__init__(input_size, hidden_size, bias) self.ln_ih = nn.LayerNorm(4 * hidden_size) self.ln_hh = nn.LayerNorm(4 * hidden_size) self.ln_ho = nn.LayerNorm(hidden_size) def forward(self, input_0): primals_2 = self.weight_ih primals_6 = self.weight_hh primals_3 = self.bias_ih primals_4 = self.bias_hh primals_5 = self.ln_ih.weight primals_7 = self.ln_ih.bias primals_8 = self.ln_hh.weight primals_9 = self.ln_hh.bias primals_10 = self.ln_ho.weight primals_11 = self.ln_ho.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
dimoteo333/TL-DR
LayerNormLSTMCell
false
12,293
[ "Apache-2.0" ]
0
b3bebc51e70a48294d7762fa73375cf1bf2ff068
https://github.com/dimoteo333/TL-DR/tree/b3bebc51e70a48294d7762fa73375cf1bf2ff068
Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/bh/cbhpcgxjg3mwo4dulstw5ie26none2yzi5sysdzl34cu6pyah4fg.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.view] # Source node to ATen node mapping: # out_1 => add, view_3 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) triton_poi_fused_add_view_0 = async_compile.triton('triton_poi_fused_add_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_add_view_0.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_view_0[grid(256)](buf2, primals_3, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class LinearNew(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(LinearNew, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
douglasrizzo/pytorch_geometric
Linear
false
12,294
[ "MIT" ]
0
effc617c6ad6daad506038bb79e4407082e74740
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
DenseSAGEConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ia/ciadpx4ik7ocmsqscox73ya2qxek7ekkw25w6rijleysqp7lfliw.py # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # setitem => full_default, index_put # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_2, [None, %iota, %iota], %full_default), kwargs = {}) triton_poi_fused_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3a/c3ajks6gn2yuzdnbqm5gdhl6nslpowgffuevr4gjguhn4q6d6v4b.py # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # setitem => full_default, index_put # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_2, [None, %iota, %iota], %full_default), kwargs = {}) triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) tmp0 = 1.0 tl.store(out_ptr0 + ((5*x0) + (16*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/y3/cy35gbcd6bgnd5eyw3uhjdcrbocdx6nthwpvgxcbrze5uqo6n2dp.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/f7/cf7frvp5h5tfdj7h2wqm2oydwprwzs74er32f2mngowdmgfjqre4.py # Topologically Sorted Source Nodes: [sum_1, clamp, out_1], Original ATen: [aten.sum, aten.clamp, aten.div] # Source node to ATen node mapping: # clamp => clamp_min # out_1 => div # sum_1 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%index_put, [-1], True), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %clamp_min), kwargs = {}) triton_poi_fused_clamp_div_sum_3 = async_compile.triton('triton_poi_fused_clamp_div_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_sum_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_sum_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(in_out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5a/c5ainvzw5uinosmtjpfsqml2wdleittg5twoainmhbtwdqayr4sw.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] # Source node to ATen node mapping: # out_3 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_4), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] stream0 = get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] triton_poi_fused_index_put_lift_fresh_1.run(buf0, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf0, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf3) del primals_1 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [sum_1, clamp, out_1], Original ATen: [aten.sum, aten.clamp, aten.div] triton_poi_fused_clamp_div_sum_3.run(buf4, buf0, 256, grid=grid(256), stream=stream0) del buf0 buf5 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_3, out=buf5) del primals_3 buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf6, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf6, reinterpret_tensor(buf4, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseSAGEConv(torch.nn.Module): """See :class:`torch_geometric.nn.conv.SAGEConv`. """ def __init__(self, in_channels, out_channels, normalize=False, bias=True): super(DenseSAGEConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) uniform(self.in_channels, self.bias) def forward(self, x, adj, mask=None, add_loop=True): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) add_loop (bool, optional): If set to :obj:`False`, the layer will not automatically add self-loops to the adjacency matrices. (default: :obj:`True`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() if add_loop: adj = adj.clone() idx = torch.arange(N, dtype=torch.long, device=adj.device) adj[:, idx, idx] = 1 out = torch.matmul(adj, x) out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1) out = torch.matmul(out, self.weight) if self.bias is not None: out = out + self.bias if self.normalize: out = F.normalize(out, p=2, dim=-1) if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 tmp0 = 1.0 tl.store(out_ptr0 + (5 * x0 + 16 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clamp_div_sum_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(in_out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 triton_poi_fused_index_put_lift_fresh_1[grid(16)](buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf3) del primals_1 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_clamp_div_sum_3[grid(256)](buf4, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del buf0 buf5 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0) del buf2 extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_3, out=buf5) del primals_3 buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_4[grid(256)](buf6, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf6, reinterpret_tensor(buf4, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseSAGEConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.SAGEConv`. """ def __init__(self, in_channels, out_channels, normalize=False, bias=True): super(DenseSAGEConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) uniform(self.in_channels, self.bias) def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
douglasrizzo/pytorch_geometric
DenseSAGEConv
false
12,295
[ "MIT" ]
0
effc617c6ad6daad506038bb79e4407082e74740
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
Gate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py # Topologically Sorted Source Nodes: [feature], Original ATen: [aten.cat] # Source node to ATen node mapping: # feature => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zr/czrkkoqvumj2d26qj2buq7wdrlw2symgssdquptsm2kn4svsp3lz.py # Topologically Sorted Source Nodes: [att, gate_score, gate_score_1, mul, sub, mul_1, add], Original ATen: [aten.tanh, aten.sigmoid, aten.repeat, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # add => add # att => tanh # gate_score => sigmoid # gate_score_1 => repeat # mul => mul # mul_1 => mul_1 # sub => sub # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%tanh,), kwargs = {}) # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%sigmoid, [1, 1, 4]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%repeat, %primals_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %repeat), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp7 = tl.load(in_ptr2 + (x2), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp2 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 8), (8, 1)) assert_size_stride(primals_4, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [feature], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_3 del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att, gate_score, gate_score_1, mul, sub, mul_1, add], Original ATen: [aten.tanh, aten.sigmoid, aten.repeat, aten.mul, aten.rsub, aten.add] triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1.run(buf2, primals_1, primals_2, buf3, 64, grid=grid(64), stream=stream0) return (buf3, primals_1, primals_2, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F class Gate(nn.Module): def __init__(self, args): super(Gate, self).__init__() self.d_model = args.d_model self.weight_proj = nn.Linear(2 * self.d_model, 1) self.tanh = nn.Tanh() def forward(self, featureA, featureB): feature = torch.cat([featureA, featureB], dim=-1) att = self.tanh(self.weight_proj(feature)) gate_score = F.sigmoid(att) gate_score = gate_score.repeat(1, 1, self.d_model) return gate_score * featureA + (1 - gate_score) * featureB def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'args': _mock_config(d_model=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp7 = tl.load(in_ptr2 + x2, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp2 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 8), (8, 1)) assert_size_stride(primals_4, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_3 del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1[grid(64)](buf2, primals_1, primals_2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, primals_1, primals_2, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2 class GateNew(nn.Module): def __init__(self, args): super(GateNew, self).__init__() self.d_model = args.d_model self.weight_proj = nn.Linear(2 * self.d_model, 1) self.tanh = nn.Tanh() def forward(self, input_0, input_1): primals_3 = self.weight_proj.weight primals_4 = self.weight_proj.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
djz233/GraphMask
Gate
false
12,296
[ "MIT" ]
0
4b699a1685f0d26973bb90cd75b09d74726cdc2f
https://github.com/djz233/GraphMask/tree/4b699a1685f0d26973bb90cd75b09d74726cdc2f
TransformerDecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/dc/cdcelrvxxp73pk3p36hgarqnpiqa2vcqisv3mmwsj7xdk4jhe23l.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/td/ctdoyra7pmasdmivlvv5olwepexmvwwkjtuh6bfidfxy6ttrdr3j.py # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add # tgt_1 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %squeeze), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/lu/clufgh66u6osl7ktnv2jqtrxa5qqz4ubgudpm47ibyb5ojwqsyt3.py # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add # tgt_1 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %squeeze), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_7), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_7), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_8), kwargs = {}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/v4/cv4ko24f26un3axamp426zbnugqu4jhirnvlxjjqhipcugzzgcbk.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_10), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ti/ctiyfh2c3cvlhay74obhe7nkxxxkzaih2gbio235oodbwfbvwzit.py # Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add] # Source node to ATen node mapping: # tgt_2 => add_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_12), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {}) triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a4/ca4uo2wxiyozqosox66kdczthghpe6qlxcx3xgdhcs2i6efpvbaa.py # Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # tgt_3 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_7 = async_compile.triton('triton_poi_fused_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/z2/cz2x6uoruxdkowfiawvku3nxpx4vgbvkygpozaunzozrsmzk6q75.py # Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # tgt_3 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_9), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_13), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_14), kwargs = {}) triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (12, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_4, (4, ), (1, ), 4), primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_4, (4, ), (1, ), 8), primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_3 buf3 = reinterpret_tensor(buf0, (1, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf4 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (1, 4, 4), (4, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (1, 4, 4), (4, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_6 buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_3.run(primals_2, buf8, buf9, buf10, 4, grid=grid(4), stream=stream0) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(primals_2, buf8, buf9, buf10, primals_7, primals_8, buf11, 16, grid=grid(16), stream=stream0) del primals_8 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf13, primals_10, 16, grid=grid(16), stream=stream0) del primals_10 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf13, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf14) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add] triton_poi_fused_add_6.run(buf15, buf11, primals_12, 16, grid=grid(16), stream=stream0) del primals_12 buf16 = buf9; del buf9 # reuse buf17 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_7.run(buf15, buf16, buf17, 4, grid=grid(4), stream=stream0) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_8.run(buf15, buf16, buf17, primals_13, primals_14, buf18, 16, grid=grid(16), stream=stream0) del buf16 del buf17 del primals_14 return (buf18, primals_2, primals_7, primals_13, primals_1, buf6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), buf8, buf11, buf13, buf15, primals_11, primals_9, primals_5, reinterpret_tensor(buf2, (1, 4, 4), (4, 1, 4), 0), reinterpret_tensor(buf3, (1, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (1, 4, 4), (4, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F import torch._utils def _get_activation_fn(activation): """Return an activation function given a string""" if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.') class TransformerDecoderLayer(nn.Module): def __init__(self, d_model, dim_feedforward=None, nhead=1, dropout=0.1, activation='relu'): super().__init__() if dim_feedforward is None: dim_feedforward = d_model self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout =dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def forward(self, query, memory, position_embedding=None): if position_embedding is not None: query = torch.cat([query, position_embedding.flatten(2).permute (1, 0, 2)], dim=0) memory = torch.cat([memory, position_embedding.flatten(2). permute(1, 0, 2)], dim=0) tgt = self.multihead_attn(query=query, key=memory, value=memory)[0] tgt = memory + self.dropout1(tgt) tgt = self.norm1(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) return tgt def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.nn import functional as F import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (12,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 4), primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 8), primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_3 buf3 = reinterpret_tensor(buf0, (1, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf3, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (1, 4, 4), (4, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (1, 4, 4), (4, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf8) del primals_6 buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(4)](primals_2, buf8, buf9, buf10, 4, XBLOCK=4, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(16)](primals_2, buf8, buf9, buf10, primals_7, primals_8, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = buf12 del buf12 triton_poi_fused_relu_5[grid(16)](buf13, primals_10, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf13, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf14) buf15 = buf14 del buf14 triton_poi_fused_add_6[grid(16)](buf15, buf11, primals_12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 buf16 = buf9 del buf9 buf17 = buf10 del buf10 triton_poi_fused_native_layer_norm_7[grid(4)](buf15, buf16, buf17, 4, XBLOCK=4, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_8[grid(16)](buf15, buf16, buf17, primals_13, primals_14, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf16 del buf17 del primals_14 return (buf18, primals_2, primals_7, primals_13, primals_1, buf6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), buf8, buf11, buf13, buf15, primals_11, primals_9, primals_5, reinterpret_tensor(buf2, ( 1, 4, 4), (4, 1, 4), 0), reinterpret_tensor(buf3, (1, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (1, 4, 4), (4, 4, 1), 0)) def _get_activation_fn(activation): """Return an activation function given a string""" if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.') class TransformerDecoderLayerNew(nn.Module): def __init__(self, d_model, dim_feedforward=None, nhead=1, dropout=0.1, activation='relu'): super().__init__() if dim_feedforward is None: dim_feedforward = d_model self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout =dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def forward(self, input_0, input_1): primals_3 = self.multihead_attn.in_proj_weight primals_4 = self.multihead_attn.in_proj_bias primals_1 = self.multihead_attn.out_proj.weight primals_6 = self.multihead_attn.out_proj.bias primals_2 = self.linear1.weight primals_7 = self.linear1.bias primals_5 = self.linear2.weight primals_8 = self.linear2.bias primals_10 = self.norm1.weight primals_12 = self.norm1.bias primals_13 = self.norm2.weight primals_14 = self.norm2.bias primals_9 = input_0 primals_11 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
dingmyu/mmclassification
TransformerDecoderLayer
false
12,297
[ "Apache-2.0" ]
0
c600b22907fb9423899f7c308c659168c2d01cd8
https://github.com/dingmyu/mmclassification/tree/c600b22907fb9423899f7c308c659168c2d01cd8
Envelope
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/og/coge23wv22hz2aekdz4xkqtdej7ozgwlhpudo45mlboyea3tskh4.py # Topologically Sorted Source Nodes: [truediv, x_pow_p0, mul_1, add, x_pow_p1, mul_2, add_1, mul_3, mul_4, add_2], Original ATen: [aten.reciprocal, aten.mul, aten.pow, aten.add] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # mul_1 => mul_2 # mul_2 => mul_3 # mul_3 => mul_4 # mul_4 => mul_5 # truediv => mul_1, reciprocal # x_pow_p0 => pow_1 # x_pow_p1 => mul # Graph fragment: # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -15.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %arg0_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 24), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, -10.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %arg0_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_5), kwargs = {}) triton_poi_fused_add_mul_pow_reciprocal_0 = async_compile.triton('triton_poi_fused_add_mul_pow_reciprocal_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp0 tmp6 = tmp5 * tmp5 tmp7 = -15.0 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tmp10 = tmp6 * tmp0 tmp11 = 24.0 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = -10.0 tmp15 = tmp10 * tmp14 tmp16 = tmp15 * tmp0 tmp17 = tmp13 + tmp16 tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv, x_pow_p0, mul_1, add, x_pow_p1, mul_2, add_1, mul_3, mul_4, add_2], Original ATen: [aten.reciprocal, aten.mul, aten.pow, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_pow_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class Envelope(torch.nn.Module): def __init__(self, exponent): super(Envelope, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, x): p, a, b, c = self.p, self.a, self.b, self.c x_pow_p0 = x.pow(p) x_pow_p1 = x_pow_p0 * x return 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p1 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'exponent': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp0 tmp6 = tmp5 * tmp5 tmp7 = -15.0 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tmp10 = tmp6 * tmp0 tmp11 = 24.0 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = -10.0 tmp15 = tmp10 * tmp14 tmp16 = tmp15 * tmp0 tmp17 = tmp13 + tmp16 tl.store(out_ptr0 + x0, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class EnvelopeNew(torch.nn.Module): def __init__(self, exponent): super(EnvelopeNew, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
douglasrizzo/pytorch_geometric
Envelope
false
12,298
[ "MIT" ]
0
effc617c6ad6daad506038bb79e4407082e74740
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
CategoricalSampler
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/c5/cc5hiyrqfmfxmipscxrurz47qz3x3e4v7b3qrmfw4clinzd5btca.py # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] # Source node to ATen node mapping: # exp => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_exp_0 = async_compile.triton('triton_poi_fused_exp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [exp, multinomial], Original ATen: [aten.exp, aten.multinomial] buf1 = torch.ops.aten.multinomial.default(buf0, 1) del buf0 buf2 = buf1 del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Sampler(nn.Module): """ args; logits: (batch, n_nodes) return; next_node: (batch, 1) TopKSampler <=> greedy; sample one with biggest probability CategoricalSampler <=> sampling; randomly sample one from possible distribution based on probability """ def __init__(self, n_samples=1, **kwargs): super().__init__(**kwargs) self.n_samples = n_samples class CategoricalSampler(Sampler): def forward(self, logits): return torch.multinomial(logits.exp(), self.n_samples) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = torch.ops.aten.multinomial.default(buf0, 1) del buf0 buf2 = buf1 del buf1 return buf2, class Sampler(nn.Module): """ args; logits: (batch, n_nodes) return; next_node: (batch, 1) TopKSampler <=> greedy; sample one with biggest probability CategoricalSampler <=> sampling; randomly sample one from possible distribution based on probability """ def __init__(self, n_samples=1, **kwargs): super().__init__(**kwargs) self.n_samples = n_samples class CategoricalSamplerNew(Sampler): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
daunfamily/VRP_MHA
CategoricalSampler
false
12,299
[ "MIT" ]
0
9c23d181d11dbbacac01299c6e8931b8e266b9b4
https://github.com/daunfamily/VRP_MHA/tree/9c23d181d11dbbacac01299c6e8931b8e266b9b4
Attention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xl/cxlpplg3hmt6k4x6alhg4yn6eq5jppxhzrzdcvvcpbupy7pjgudn.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp] # Source node to ATen node mapping: # max_1 => max_1 # out => exp # score_1 => div # src_max => clamp_min # sub => sub # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %clamp_min), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused_clamp_div_exp_max_sub_0 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fi/cfijnjpiz4ruggqhl6zhj4ujuexfsuzxvpo26muzj4bggik4i5hl.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add # exp_1 => exp_1 # max_1 => max_1 # score_1 => div # src_max => clamp_min # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/h7/ch7xj7aj6agx6frik7qd7tffe2pmrsjjensiwx2hy2md4kde7aj5.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add # exp_1 => exp_1 # max_1 => max_1 # out_1 => div_1 # score_1 => div # src_max => clamp_min # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_sub_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1.run(buf1, buf0, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2.run(buf3, buf2, 256, grid=grid(256), stream=stream0) del buf2 buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4) del arg2_1 del buf3 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.utils.data def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_sub_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1[grid(64)](buf1, buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2[grid(256)](buf3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf4 = buf0 del buf0 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class AttentionNew(torch.nn.Module): def __init__(self, dropout=0): super(AttentionNew, self).__init__() self.dropout = dropout def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
douglasrizzo/pytorch_geometric
Attention
false
12,300
[ "MIT" ]
0
effc617c6ad6daad506038bb79e4407082e74740
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
Model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/54/c546inlectt6zvbpgn5qhxi6h2mqgwz227jurnrzfeistnsnjut6.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 64), (64, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32), (32, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (4, 64), (64, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf8, 2048, grid=grid(2048), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 64), (1, 32), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf4 # reuse buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf7, 4096, grid=grid(4096), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf5, (64, 64), (64, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Model(nn.Module): def __init__(self, input_size, dropout=0.5): super(Model, self).__init__() self.dropout = dropout if self.dropout > 0: self.dropout = nn.Dropout(dropout) self.encode_w1 = nn.Linear(input_size, 64) self.encode_w2 = nn.Linear(64, 32) self.decode_w1 = nn.Linear(32, 64) self.decode_w2 = nn.Linear(64, input_size) def encoder(self, x): x = self.encode_w1(x) x = torch.relu(x) x = self.encode_w2(x) x = torch.relu(x) if self.dropout: x = self.dropout(x) return x def decoder(self, x): x = self.decode_w1(x) x = torch.relu(x) x = self.decode_w2(x) return x def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 64), (64, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32), (32, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (4, 64), (64, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_2, buf9, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(2048)](buf3, primals_5, buf8, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 64), (1, 32), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf5, primals_7, buf7, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf5, (64, 64), (64, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9 class ModelNew(nn.Module): def __init__(self, input_size, dropout=0.5): super(ModelNew, self).__init__() self.dropout = dropout if self.dropout > 0: self.dropout = nn.Dropout(dropout) self.encode_w1 = nn.Linear(input_size, 64) self.encode_w2 = nn.Linear(64, 32) self.decode_w1 = nn.Linear(32, 64) self.decode_w2 = nn.Linear(64, input_size) def encoder(self, x): x = self.encode_w1(x) x = torch.relu(x) x = self.encode_w2(x) x = torch.relu(x) if self.dropout: x = self.dropout(x) return x def decoder(self, x): x = self.decode_w1(x) x = torch.relu(x) x = self.decode_w2(x) return x def forward(self, input_0): primals_1 = self.encode_w1.weight primals_2 = self.encode_w1.bias primals_4 = self.encode_w2.weight primals_5 = self.encode_w2.bias primals_6 = self.decode_w1.weight primals_7 = self.decode_w1.bias primals_8 = self.decode_w2.weight primals_9 = self.decode_w2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
dohnlee/qufa2021
Model
false
12,301
[ "MIT" ]
0
5fb42caee09ec228358e49768e32c75e3c0094ce
https://github.com/dohnlee/qufa2021/tree/5fb42caee09ec228358e49768e32c75e3c0094ce
MaxPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/jf/cjf7zenaxtvwhbfrvvghsyyrrhxyrlvtj5rotfw7n2nqtvscv3l7.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {}) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float("-inf"), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float("-inf"), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float("-inf"), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float("-inf"), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float("-inf"), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float("-inf"), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float("-inf"), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float("-inf"), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float("-inf"), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + (x4), tmp91, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class MaxPoolPad(nn.Module): def __init__(self): super(MaxPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + x4, tmp91, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)]( arg0_1, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), class MaxPoolPadNew(nn.Module): def __init__(self): super(MaxPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dowhilefalse/DeOldify
MaxPoolPad
false
12,302
[ "MIT" ]
0
08f012cdbe36e3f8482460f57e1844b361a7fb16
https://github.com/dowhilefalse/DeOldify/tree/08f012cdbe36e3f8482460f57e1844b361a7fb16
DenseGraphConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3i/c3iibarqokkpdvrjfwijmnifcbse5v3c5vj2oqjzbj6rok54jxf5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/w2/cw2bwqpq3dkexeyqz25khcvdcedkdcrcwpb7zrtd6eayijd5lgez.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] # Source node to ATen node mapping: # out_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %view_6), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf4, buf3, primals_5, 256, grid=grid(256), stream=stream0) del buf3 del primals_5 return (buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseGraphConv(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GraphConv`. """ def __init__(self, in_channels, out_channels, aggr='add', bias=True): assert aggr in ['add', 'mean', 'max'] super(DenseGraphConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aggr = aggr self.weight = Parameter(torch.Tensor(in_channels, out_channels)) self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) self.lin.reset_parameters() def forward(self, x, adj, mask=None): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() out = torch.matmul(adj, x) out = torch.matmul(out, self.weight) if self.aggr == 'mean': out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1) elif self.aggr == 'max': out = out.max(dim=-1)[0] out = out + self.lin(x) if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseGraphConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GraphConv`. """ def __init__(self, in_channels, out_channels, aggr='add', bias=True): assert aggr in ['add', 'mean', 'max'] super(DenseGraphConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aggr = aggr self.weight = Parameter(torch.Tensor(in_channels, out_channels)) self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) self.lin.reset_parameters() def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.lin.weight primals_5 = self.lin.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
douglasrizzo/pytorch_geometric
DenseGraphConv
false
12,303
[ "MIT" ]
0
effc617c6ad6daad506038bb79e4407082e74740
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
SelfAttentionUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/b4/cb4uraceoow244i5n2enu3fo25p6mrw2jrotiplcnv4fm7kqvxry.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((12*(x0 // 4)) + (48*x1) + (x0 % 4)), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5q/c5qj457ayaq7fys4q7wgcnubkaz3zktadlchvevuczmv2jnhebkd.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone_1 # Graph fragment: # %clone_1 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%squeeze,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (12*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s4/cs472yivvl3yzse325afzknsz7ua5dqrqzmwls3lwujk3hte6xkl.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zh/czh6tw7ngffcygnivwvcjex5edxy3ms4t27ymyn2hemxlpspxzq7.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/la/claxsme7yqsdzeonqq2iowvclxlentfbkqvzkxqtzqzk4g2snafx.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uh/cuhry756yyxysas37yqxnudohhzwsejznuzhafjqor3jtscltejn.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm, aten.native_layer_norm_backward] # Source node to ATen node mapping: # layer_norm => add_1, add_2, clone_3, mul_1, mul_2, rsqrt, sub_1, var_mean # Graph fragment: # %clone_3 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_9,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_3, [1, 2]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_3, %getitem_1), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_5), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%rsqrt, 16), kwargs = {}) triton_per_fused_native_layer_norm_native_layer_norm_backward_6 = async_compile.triton('triton_per_fused_native_layer_norm_native_layer_norm_backward_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_layer_norm_native_layer_norm_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_layer_norm_native_layer_norm_backward_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = (rindex // 4) x0 = xindex r3 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0) + (16*r2)), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (16*x0)), xmask, other=0.0) tmp28 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr3 + (r3), None, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 + tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 16, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp4 - tmp14 tmp22 = 16.0 tmp23 = tmp20 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp21 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tmp32 = 0.0625 tmp33 = tmp26 * tmp32 tl.store(out_ptr2 + (r3 + (16*x0)), tmp27, xmask) tl.store(out_ptr3 + (r3 + (16*x0)), tmp31, xmask) tl.store(out_ptr4 + (x0), tmp33, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fe/cfe3dduf3te2shy5wd7vbqgrapmvibq6pqk7lmdy2uwcnlqv25je.py # Topologically Sorted Source Nodes: [res], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # res => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf1, buf3, 192, grid=grid(192), stream=stream0) del buf1 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 1, 4), (1, 0, 16), 64), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 128), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf7, buf8, 4, 16, grid=grid(4, 16), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf9) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm, aten.native_layer_norm_backward] triton_per_fused_native_layer_norm_native_layer_norm_backward_6.run(buf9, primals_1, primals_4, primals_5, buf13, buf14, buf15, 4, 16, grid=grid(4), stream=stream0) del primals_1 del primals_5 buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [res], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_7.run(buf9, buf16, 64, grid=grid(64), stream=stream0) del buf9 return (buf14, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf13, buf15, buf16, primals_3, reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 128), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 1), 64), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class SelfAttentionUnit(nn.Module): def __init__(self, embed_dim, num_heads, max_len, dropout=0.8, bias= False, skip_connection=True): super(SelfAttentionUnit, self).__init__() self.skip_connection = skip_connection self.attn = nn.MultiheadAttention(embed_dim=embed_dim, num_heads= num_heads, dropout=dropout, bias=bias) self.act = nn.ReLU() self.ln = nn.LayerNorm([max_len, embed_dim]) def forward(self, x): x = x.permute(1, 0, 2) res, _ = self.attn(key=x, value=x, query=x) res = self.act(res) if self.skip_connection: res = res + x res = res.permute(1, 0, 2) return self.ln(res) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4, 'max_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (12 * (x0 // 4) + 48 * x1 + x0 % 4), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_per_fused_native_layer_norm_native_layer_norm_backward_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = rindex // 4 x0 = xindex r3 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0 + 16 * r2), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 16 * x0), xmask, other=0.0) tmp28 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr3 + r3, None, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 + tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 16, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp4 - tmp14 tmp22 = 16.0 tmp23 = tmp20 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp21 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tmp32 = 0.0625 tmp33 = tmp26 * tmp32 tl.store(out_ptr2 + (r3 + 16 * x0), tmp27, xmask) tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp33, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32) triton_poi_fused_mul_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(192)](buf1, buf3, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 1, 4), (1, 0, 16), 64), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 128), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf9) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) triton_per_fused_native_layer_norm_native_layer_norm_backward_6[grid(4) ](buf9, primals_1, primals_4, primals_5, buf13, buf14, buf15, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_5 buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf9, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf9 return buf14, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), buf13, buf15, buf16, primals_3, reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 128), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 1), 64) class SelfAttentionUnitNew(nn.Module): def __init__(self, embed_dim, num_heads, max_len, dropout=0.8, bias= False, skip_connection=True): super(SelfAttentionUnitNew, self).__init__() self.skip_connection = skip_connection self.attn = nn.MultiheadAttention(embed_dim=embed_dim, num_heads= num_heads, dropout=dropout, bias=bias) self.act = nn.ReLU() self.ln = nn.LayerNorm([max_len, embed_dim]) def forward(self, input_0): primals_2 = self.attn.in_proj_weight primals_3 = self.attn.out_proj.weight primals_4 = self.ln.weight primals_5 = self.ln.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dohnlee/qufa2021
SelfAttentionUnit
false
12,304
[ "MIT" ]
0
5fb42caee09ec228358e49768e32c75e3c0094ce
https://github.com/dohnlee/qufa2021/tree/5fb42caee09ec228358e49768e32c75e3c0094ce
AvgPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/pr/cprzlfpjjqlj6tudvbc455jxno35xlnta4wgmkbc6uo5zmcxii4s.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => avg_pool2d # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], False, False), kwargs = {}) triton_poi_fused_avg_pool2d_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + (x4), tmp93, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class AvgPoolPad(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * ( 0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + ( -1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + x4, tmp93, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), class AvgPoolPadNew(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dowhilefalse/DeOldify
AvgPoolPad
false
12,305
[ "MIT" ]
0
08f012cdbe36e3f8482460f57e1844b361a7fb16
https://github.com/dowhilefalse/DeOldify/tree/08f012cdbe36e3f8482460f57e1844b361a7fb16
MultiHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/v6/cv6odvhmmcyvquog4eo62pdliew53orxzwe2wfzampr64jy3ppa7.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] # Source node to ATen node mapping: # out_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_5), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pr/cpr46kebggqhplwxfjepx3opy5gwceg23zjukixehtkuuugc5juo.py # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] # Source node to ATen node mapping: # score => bmm # Graph fragment: # %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%view_22, %view_25), kwargs = {}) # %permute_16 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_22, [0, 2, 1]), kwargs = {}) triton_poi_fused_bmm_transpose_1 = async_compile.triton('triton_poi_fused_bmm_transpose_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_transpose_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2) + (64*((x1 + (4*(x2 % 4))) // 16))), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) tl.store(out_ptr1 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s3/cs35hxhkkmqsjvinhszu6rfwn3hooazjypcbwiv3xr2ywjn3ewei.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out_6], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp] # Source node to ATen node mapping: # max_1 => max_1 # out_6 => exp # score_1 => div # src_max => clamp_min # sub => sub # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %clamp_min), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused_clamp_div_exp_max_sub_2 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rq/crqro5go55q5jp5dqpl4be2mqfi5nsh7elfb6swmltvboawz2isq.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add_3 # exp_1 => exp_1 # max_1 => max_1 # score_1 => div # src_max => clamp_min # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tc/ctcdm2hcp45nesppfpnqkkcad3pvaqhoo4vgpxv6xzpnd4nc6svo.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_7], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add_3 # exp_1 => exp_1 # max_1 => max_1 # out_7 => div_1 # score_1 => div # src_max => clamp_min # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add_3), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (4, 1), 0), out=buf1) del primals_6 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), out=buf2) del primals_8 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] triton_poi_fused_add_0.run(buf4, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] triton_poi_fused_bmm_transpose_1.run(buf3, buf5, buf15, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0); del buf3 # reuse buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] triton_poi_fused_bmm_transpose_1.run(buf4, buf6, buf16, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm] extern_kernels.bmm(buf5, buf6, out=buf7) buf8 = reinterpret_tensor(buf6, (4, 4, 1, 4, 4), (64, 16, 256, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out_6], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp] triton_poi_fused_clamp_div_exp_max_sub_2.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = empty_strided_cuda((4, 4, 1, 4, 1), (16, 4, 64, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3.run(buf8, buf7, buf9, 64, grid=grid(64), stream=stream0) buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_7], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4.run(buf10, buf9, 256, grid=grid(256), stream=stream0) del buf9 buf11 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.add] triton_poi_fused_add_0.run(buf11, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 buf12 = buf5; del buf5 # reuse buf14 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.bmm, aten.transpose] triton_poi_fused_bmm_transpose_1.run(buf11, buf12, buf14, 256, grid=grid(256), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf12, out=buf13) del buf12 return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, buf10, buf14, buf15, buf16, reinterpret_tensor(primals_3, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) class MultiHead(Attention): def __init__(self, in_channels, out_channels, heads=1, groups=1, dropout=0, bias=True): super(MultiHead, self).__init__(dropout) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.groups = groups self.bias = bias assert in_channels % heads == 0 and out_channels % heads == 0 assert in_channels % groups == 0 and out_channels % groups == 0 assert max(groups, self.heads) % min(groups, self.heads) == 0 self.lin_q = Linear(in_channels, out_channels, groups, bias) self.lin_k = Linear(in_channels, out_channels, groups, bias) self.lin_v = Linear(in_channels, out_channels, groups, bias) self.reset_parameters() def reset_parameters(self): self.lin_q.reset_parameters() self.lin_k.reset_parameters() self.lin_v.reset_parameters() def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) == value.size(-1) assert key.size(-2) == value.size(-2) query = self.lin_q(query) key = self.lin_k(key) value = self.lin_v(value) size = list(query.size())[:-2] out_channels_per_head = self.out_channels // self.heads query_size = size + [query.size(-2), self.heads, out_channels_per_head] query = query.view(*query_size).transpose(-2, -3) key_size = size + [key.size(-2), self.heads, out_channels_per_head] key = key.view(*key_size).transpose(-2, -3) value_size = size + [value.size(-2), self.heads, out_channels_per_head] value = value.view(*value_size).transpose(-2, -3) out = super(MultiHead, self).forward(query, key, value) out = out.transpose(-3, -2).contiguous() out = out.view(*(size + [query.size(-2), self.out_channels])) return out def __repr__(self): return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format( self.__class__.__name__, self.in_channels, self.out_channels, self.heads, self.groups, self.dropout, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2 + 64 * ((x1 + 4 * (x2 % 4)) // 16)), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) tl.store(out_ptr1 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (4, 1), 0), out=buf1) del primals_6 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), out=buf2) del primals_8 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_0[grid(256)](buf4, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf3, buf5, buf15, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0) del buf3 buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf4, buf6, buf16, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(buf5, buf6, out=buf7) buf8 = reinterpret_tensor(buf6, (4, 4, 1, 4, 4), (64, 16, 256, 4, 1), 0 ) del buf6 triton_poi_fused_clamp_div_exp_max_sub_2[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 1, 4, 1), (16, 4, 64, 1, 64), torch.float32) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3[grid(64)](buf8, buf7, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0 ) del buf8 triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4[grid(256)](buf10, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf11 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_0[grid(256)](buf11, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf12 = buf5 del buf5 buf14 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf11, buf12, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0) del buf11 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf12, out=buf13) del buf12 return reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf7, buf10, buf14, buf15, buf16, reinterpret_tensor(primals_3, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) class MultiHeadNew(Attention): def __init__(self, in_channels, out_channels, heads=1, groups=1, dropout=0, bias=True): super(MultiHeadNew, self).__init__(dropout) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.groups = groups self.bias = bias assert in_channels % heads == 0 and out_channels % heads == 0 assert in_channels % groups == 0 and out_channels % groups == 0 assert max(groups, self.heads) % min(groups, self.heads) == 0 self.lin_q = Linear(in_channels, out_channels, groups, bias) self.lin_k = Linear(in_channels, out_channels, groups, bias) self.lin_v = Linear(in_channels, out_channels, groups, bias) self.reset_parameters() def reset_parameters(self): self.lin_q.reset_parameters() self.lin_k.reset_parameters() self.lin_v.reset_parameters() def __repr__(self): return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format( self.__class__.__name__, self.in_channels, self.out_channels, self.heads, self.groups, self.dropout, self.bias) def forward(self, input_0, input_1, input_2): primals_4 = self.lin_q.weight primals_5 = self.lin_q.bias primals_6 = self.lin_k.weight primals_7 = self.lin_k.bias primals_8 = self.lin_v.weight primals_9 = self.lin_v.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
douglasrizzo/pytorch_geometric
MultiHead
false
12,306
[ "MIT" ]
0
effc617c6ad6daad506038bb79e4407082e74740
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
ResidualLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/e2/ce2g7tigorqocikoyop3snym2gsvx24wihndsskmg2opuqcxo23u.py # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7n/c7nhgutyhtoc6mh6c4kax3vehfp6poo6edleostzr3iw7c7dh4en.py # Topologically Sorted Source Nodes: [sigmoid_1, mul_1, add], Original ATen: [aten.sigmoid, aten.mul, aten.add] # Source node to ATen node mapping: # add => add_2 # mul_1 => mul_1 # sigmoid_1 => sigmoid_1 # Graph fragment: # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_8,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, %sigmoid_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %mul_1), kwargs = {}) triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_1, mul_1, add], Original ATen: [aten.sigmoid, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_1.run(primals_2, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0) return (buf3, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class ResidualLayer(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super(ResidualLayer, self).__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, x): return x + self.act(self.lin2(self.act(self.lin1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](primals_2, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class ResidualLayerNew(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super(ResidualLayerNew, self).__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, input_0): primals_1 = self.lin1.weight primals_3 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
douglasrizzo/pytorch_geometric
ResidualLayer
false
12,307
[ "MIT" ]
0
effc617c6ad6daad506038bb79e4407082e74740
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
BPRLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/uy/cuyeld5sulrg2l2zfz6tdc3pf2qufuofz6b5m6vsk3dr6ka2ymlm.py # Topologically Sorted Source Nodes: [sub, sigmoid, add, log, mean, loss], Original ATen: [aten.sub, aten.sigmoid, aten.add, aten.log, aten.mean, aten.neg] # Source node to ATen node mapping: # add => add # log => log # loss => neg # mean => mean # sigmoid => sigmoid # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sub,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1e-10), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%log,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) triton_per_fused_add_log_mean_neg_sigmoid_sub_0 = async_compile.triton('triton_per_fused_add_log_mean_neg_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_neg_sigmoid_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_neg_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1e-10 tmp5 = tmp3 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = -tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, sigmoid, add, log, mean, loss], Original ATen: [aten.sub, aten.sigmoid, aten.add, aten.log, aten.mean, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_add_log_mean_neg_sigmoid_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BPRLoss(nn.Module): """ BPRLoss, based on Bayesian Personalized Ranking Args: - gamma(float): Small value to avoid division by zero Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = BPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self, gamma=1e-10): super(BPRLoss, self).__init__() self.gamma = gamma def forward(self, pos_score, neg_score): loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score) ).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_neg_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1e-10 tmp5 = tmp3 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = -tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mean_neg_sigmoid_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BPRLossNew(nn.Module): """ BPRLoss, based on Bayesian Personalized Ranking Args: - gamma(float): Small value to avoid division by zero Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = BPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self, gamma=1e-10): super(BPRLossNew, self).__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dreaming-qin/RecBole
BPRLoss
false
12,308
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
ResNetV2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/a2/ca2l7bjxfwrklzvcxfa2hnyzqh3p6neak37vi6fkugdhbu26fbpz.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 768 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (147*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/co/ccosum7u5lx5fx5hf5opofiygxj2ntiq67yo5gfegevmhtkaru4r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qg/cqg4z653mpzmif22rwtpmv42y4lbkkxhxjqguwoxl3wb6cn5fn7k.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 262144 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ot/cotn5a2cqhwvdw4ugt6b2a4jl2ou2mh37mnmwxgwogdqw4kcufhp.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1048576 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = (yindex // 1024) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (1024*x2) + (9216*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bh/cbh2ag2v7orygb3ziuscjd5uid7vytws6fgziabekhv6dedo4sah.py # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add => add # sqrt => sqrt # sub => sub # var_mean => var_mean # w => div # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_5 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 147 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (147*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 147, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 147.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr1 + (r1 + (147*x0)), tmp23, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qd/cqdfjpoxnsihncv4uccxtrnfsf3ql6wvc6wgwpd2fj5i6yyxupol.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # input_2 => add_1, rsqrt, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_red_fused_native_group_norm_6 = async_compile.triton('triton_red_fused_native_group_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = (rindex // 8) tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (262144*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pj/cpjraohhskikqkpyyhuz5sq72665tvzjie254ptm3wjt4sw363dw.py # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # input_2 => add_2, mul_1 # input_3 => relu # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {}) triton_poi_fused_native_group_norm_relu_7 = async_compile.triton('triton_poi_fused_native_group_norm_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = (xindex // 262144) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 8192.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/en/cenibl6xufmpsxvh6eyb2umnz2kxwu7iax4uwtef7x45ck3jiyqm.py # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_4 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = (xindex // 256) % 15 x2 = (xindex // 3840) % 15 x3 = (xindex // 57600) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp3 = tl.load(in_ptr0 + (512 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp5 = tl.load(in_ptr0 + (8192 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp7 = tl.load(in_ptr0 + (8448 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp9 = tl.load(in_ptr0 + (8704 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp11 = tl.load(in_ptr0 + (16384 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp13 = tl.load(in_ptr0 + (16640 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp15 = tl.load(in_ptr0 + (16896 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + (x4), tmp16, xmask) tl.store(out_ptr1 + (x4), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7e/c7eyyakbx3wwdxkitvcdctegijacmtfs3f5mwqlotacqy2za6isi.py # Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_1 => add_3 # sqrt_1 => sqrt_1 # sub_1 => sub_2 # var_mean_1 => var_mean_2 # w_1 => div_1 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_5, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_5, %getitem_7), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt_1), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_9 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1024, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1024 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nh/cnhs4vcr657vluxkvtqufylpkhx3e46cwffrst5vsuhqb5bee2hm.py # Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # residual_1 => add_4, rsqrt_1, var_mean_3 # Graph fragment: # %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_per_fused_native_group_norm_10 = async_compile.triton('triton_per_fused_native_group_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4096, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4096 rnumel = 225 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r2 = rindex x0 = xindex % 1024 x1 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (1024*r2) + (230400*x1)), rmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 225, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 225.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + (x3), tmp21, None) tl.store(out_ptr0 + (x3), tmp10, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jp/cjpyntupcqudw3ggo5knfj7ch7ghxyjxqolntzzvjzgk5am7l5vv.py # Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_2 => add_6 # sqrt_2 => sqrt_2 # sub_2 => sub_4 # var_mean_2 => var_mean_4 # w_2 => div_2 # Graph fragment: # %var_mean_4 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_8, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_8, %getitem_11), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {}) # %sqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_6,), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_4, %sqrt_2), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_11 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hj/chj6ouuwknkysfru4ls3ofhe7alveqoisdmvuaoevumnq4wt2k6w.py # Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_2 => add_7, rsqrt_2, var_mean_5 # Graph fragment: # %var_mean_5 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-06), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_7,), kwargs = {}) triton_red_fused_native_group_norm_12 = async_compile.triton('triton_red_fused_native_group_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 1800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = (rindex // 8) tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (57600*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 1800.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/oj/cojzk2u64jku6zsbuoyuudooypcp7v4wisckmjekrx7w47ide3nn.py # Topologically Sorted Source Nodes: [group_norm_2, y], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_2 => add_8, mul_5 # y => relu_1 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_17), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %unsqueeze_14), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_8,), kwargs = {}) triton_poi_fused_native_group_norm_relu_13 = async_compile.triton('triton_poi_fused_native_group_norm_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 256 x2 = (xindex // 57600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1800.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/au/cauwci26izb7zaj7j2kns6emmf3imcuzuc7mya2u5lekb2a4uxvi.py # Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_3 => add_9 # sqrt_3 => sqrt_3 # sub_3 => sub_6 # var_mean_3 => var_mean_6 # w_3 => div_3 # Graph fragment: # %var_mean_6 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_11, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_11, %getitem_15), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_14, 1e-05), kwargs = {}) # %sqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_9,), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_6, %sqrt_3), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_14 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[256, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 256 rnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2304*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2304.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2304*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2304*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/j6/cj6u2qoik6ggh7klfu5csrib7fmrd5bj3x7ohmjq4r3slsaz6opp.py # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # y_2 => add_13, rsqrt_4, var_mean_9 # Graph fragment: # %var_mean_9 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_8, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_20, 1e-06), kwargs = {}) # %rsqrt_4 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_13,), kwargs = {}) triton_red_fused_native_group_norm_15 = async_compile.triton('triton_red_fused_native_group_norm_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 7200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = (rindex // 32) tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (230400*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 7200.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/n3/cn3wqwtcg63qqjfryzmmh4maj6kopa4zq2o4fttlkcmvufb32tlf.py # Topologically Sorted Source Nodes: [residual_1, y_2, add_5, y_3], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_5 => add_15 # residual_1 => add_5, mul_3 # y_2 => add_14, mul_9 # y_3 => relu_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, %unsqueeze_29), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %unsqueeze_26), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_14), kwargs = {}) # %relu_3 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_15,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_16 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr): xnumel = 921600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 230400) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (1024*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (1024*x2)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + (x3), None) tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 225.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 7200.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/za/cza7xohy5huey54ued5pdquhp4jsmdldpvdhl7u6h6evhljn6ntg.py # Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_6 => add_16 # sqrt_5 => sqrt_5 # sub_5 => sub_10 # var_mean_5 => var_mean_10 # w_5 => div_5 # Graph fragment: # %var_mean_10 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_17, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_17, %getitem_23), kwargs = {}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_22, 1e-05), kwargs = {}) # %sqrt_5 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_16,), kwargs = {}) # %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_10, %sqrt_5), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_17 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qv/cqv2al4zrzdetvahtrxpbboyj4cdfrtikullucdr7ninla2ojfi3.py # Topologically Sorted Source Nodes: [y_6, add_9, y_7], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_9 => add_25 # y_6 => add_24, mul_15 # y_7 => relu_6 # Graph fragment: # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, %unsqueeze_47), kwargs = {}) # %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, %unsqueeze_44), kwargs = {}) # %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_3, %add_24), kwargs = {}) # %relu_6 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_25,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_18 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 921600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 230400) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x3), None) tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 7200.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (x3), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3e/c3e3jkfbcq52r6g6b6cbrryxsycoi3yklyx24onnuazrjrv64isp.py # Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_18, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_18 => add_46 # sqrt_14 => sqrt_14 # sub_14 => sub_28 # var_mean_14 => var_mean_28 # w_14 => div_14 # Graph fragment: # %var_mean_28 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_44, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_44, %getitem_59), kwargs = {}) # %add_46 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_58, 1e-05), kwargs = {}) # %sqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_46,), kwargs = {}) # %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_28, %sqrt_14), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_19 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[2048, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 2048 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gq/cgqqbj6ixr6xvohoqin2rmwkifwz3m65ciqtowyt5fhop4xj6y7o.py # Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # residual_3 => add_47, rsqrt_14, var_mean_29 # Graph fragment: # %var_mean_29 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_28, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_47 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_60, 1e-05), kwargs = {}) # %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_47,), kwargs = {}) triton_per_fused_native_group_norm_20 = async_compile.triton('triton_per_fused_native_group_norm_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[8192, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 8192 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2048 x1 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (2048*r2) + (131072*x1)), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 64, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 64.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x3), tmp18, None) tl.store(out_ptr0 + (x3), tmp8, None) tl.store(out_ptr1 + (x3), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jz/cjzjlifmtzexhth3v5uhemdz6lruuvuube75oxni47hhafmbjogo.py # Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_19, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_19 => add_49 # sqrt_15 => sqrt_15 # sub_15 => sub_30 # var_mean_15 => var_mean_30 # w_15 => div_15 # Graph fragment: # %var_mean_30 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_47, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_47, %getitem_63), kwargs = {}) # %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_62, 1e-05), kwargs = {}) # %sqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_49,), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_30, %sqrt_15), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_21 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[512, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 512 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wr/cwrn5excbg56ob6bzzhwtavbqfqb7pvzdegt7kp4lbgagsgrr2bd.py # Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_15 => add_50, rsqrt_15, var_mean_31 # Graph fragment: # %var_mean_31 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_50 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_64, 1e-06), kwargs = {}) # %rsqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_50,), kwargs = {}) triton_red_fused_native_group_norm_22 = async_compile.triton('triton_red_fused_native_group_norm_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 3600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (115200*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 3600.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/d4/cd427eloy2yjw2svol6vip5ycllxrhwgof6m2c6aujoopvoctvdd.py # Topologically Sorted Source Nodes: [group_norm_15, y_16], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_15 => add_51, mul_31 # y_16 => relu_13 # Graph fragment: # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, %unsqueeze_95), kwargs = {}) # %add_51 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_31, %unsqueeze_92), kwargs = {}) # %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_51,), kwargs = {}) triton_poi_fused_native_group_norm_relu_23 = async_compile.triton('triton_poi_fused_native_group_norm_relu_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 460800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = (xindex // 115200) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 3600.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ml/cmlxchcvym4okka7o4w3dtz75nwcpens7klv7see6gzrgjru4crn.py # Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_20, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_20 => add_52 # sqrt_16 => sqrt_16 # sub_16 => sub_32 # var_mean_16 => var_mean_32 # w_16 => div_16 # Graph fragment: # %var_mean_32 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_50, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_50, %getitem_67), kwargs = {}) # %add_52 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_66, 1e-05), kwargs = {}) # %sqrt_16 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_52,), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_32, %sqrt_16), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_24 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4608*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 4608.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (4608*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (4608*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ql/cqlfabfzu7z7frymncc5rvxsuisyioetxpzwyizodqojh3qbf3ww.py # Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_16 => add_53, rsqrt_16, var_mean_33 # Graph fragment: # %var_mean_33 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_32, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_53 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_68, 1e-06), kwargs = {}) # %rsqrt_16 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_53,), kwargs = {}) triton_per_fused_native_group_norm_25 = async_compile.triton('triton_per_fused_native_group_norm_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_25', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = (rindex // 16) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (32768*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/je/cje4qmnqbitm4amlfxfa3mdxapvvr5hptd7oqfvjtooh5sfigskl.py # Topologically Sorted Source Nodes: [group_norm_16, y_17], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_16 => add_54, mul_33 # y_17 => relu_14 # Graph fragment: # %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_33, %unsqueeze_101), kwargs = {}) # %add_54 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_33, %unsqueeze_98), kwargs = {}) # %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_54,), kwargs = {}) triton_poi_fused_native_group_norm_relu_26 = async_compile.triton('triton_poi_fused_native_group_norm_relu_26', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = (xindex // 32768) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pa/cpapwllspqbtgblftrbrapxpxfewknnyrkauwrnhp7qhdxyjnjyq.py # Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_21, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_21 => add_55 # sqrt_17 => sqrt_17 # sub_17 => sub_34 # var_mean_17 => var_mean_34 # w_17 => div_17 # Graph fragment: # %var_mean_34 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_53, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_53, %getitem_71), kwargs = {}) # %add_55 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_70, 1e-05), kwargs = {}) # %sqrt_17 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_55,), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_34, %sqrt_17), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_27 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_27', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[2048, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_27', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 2048 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (512*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (512*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mn/cmnjsvtayefkonejpy7zk4q2mcsbvzyofwxkaeldnf5t7ebemkif.py # Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # y_18 => add_56, rsqrt_17, var_mean_35 # Graph fragment: # %var_mean_35 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_34, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_56 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_72, 1e-06), kwargs = {}) # %rsqrt_17 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_56,), kwargs = {}) triton_red_fused_native_group_norm_28 = async_compile.triton('triton_red_fused_native_group_norm_28', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 64 r3 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ji/cjijxahsz2tpalc64o6xljydbultwefrwydw7xdce475v6s6ttfb.py # Topologically Sorted Source Nodes: [residual_3, y_18, add_22, y_19], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_22 => add_58 # residual_3 => add_48, mul_29 # y_18 => add_57, mul_35 # y_19 => relu_15 # Graph fragment: # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_29, %unsqueeze_89), kwargs = {}) # %add_48 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_29, %unsqueeze_86), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_35, %unsqueeze_107), kwargs = {}) # %add_57 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_35, %unsqueeze_104), kwargs = {}) # %add_58 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_48, %add_57), kwargs = {}) # %relu_15 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_58,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_29 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_29', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_29', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = (xindex // 131072) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (2048*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (2048*x2)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + (x3), None) tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 4096.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/e5/ce5e725kxvl6jl7eeekitaoe56q5reesvisbzhqldhffeu2qrkfc.py # Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_23, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_23 => add_59 # sqrt_18 => sqrt_18 # sub_18 => sub_36 # var_mean_18 => var_mean_36 # w_18 => div_18 # Graph fragment: # %var_mean_36 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_56, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_56, %getitem_75), kwargs = {}) # %add_59 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_74, 1e-05), kwargs = {}) # %sqrt_18 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_59,), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_36, %sqrt_18), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_30 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_30', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_30', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/k3/ck3bygbjqyc7l2hv4slwiesyb5h7v6uvamlqu24x7fojtgjyysbk.py # Topologically Sorted Source Nodes: [y_22, add_26, y_23], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_26 => add_68 # y_22 => add_67, mul_41 # y_23 => relu_18 # Graph fragment: # %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_41, %unsqueeze_125), kwargs = {}) # %add_67 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_41, %unsqueeze_122), kwargs = {}) # %add_68 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_15, %add_67), kwargs = {}) # %relu_18 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_68,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_31 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_31', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = (xindex // 131072) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x3), None) tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 4096.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (x3), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/w7/cw7buxayjszrffyrdccjcbcic36lffdaj7qocb2wpcxz4k46hhci.py # Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_35, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_35 => add_89 # sqrt_27 => sqrt_27 # sub_27 => sub_54 # var_mean_27 => var_mean_54 # w_27 => div_27 # Graph fragment: # %var_mean_54 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_83, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_83, %getitem_111), kwargs = {}) # %add_89 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_110, 1e-05), kwargs = {}) # %sqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_89,), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_54, %sqrt_27), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_32 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_32', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[4096, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_32', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 4096 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a5/ca5qmfavufo2qfs3p4ctwkbe7k366d267lmsw7ioa4nlu6dbqihs.py # Topologically Sorted Source Nodes: [residual_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # residual_5 => add_90, rsqrt_27, var_mean_55 # Graph fragment: # %var_mean_55 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_54, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_90 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_112, 1e-05), kwargs = {}) # %rsqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_90,), kwargs = {}) triton_per_fused_native_group_norm_33 = async_compile.triton('triton_per_fused_native_group_norm_33', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16384, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_33', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16384 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = (xindex // 4096) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (65536*x1)), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 16, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 16.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x3), tmp18, None) tl.store(out_ptr0 + (x3), tmp8, None) tl.store(out_ptr1 + (x3), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/i5/ci5mqnd52qvyrtjersam7awijmxjmjmms53bnijcvka3s3m4j7xb.py # Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_36, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_36 => add_92 # sqrt_28 => sqrt_28 # sub_28 => sub_56 # var_mean_28 => var_mean_56 # w_28 => div_28 # Graph fragment: # %var_mean_56 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_86, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_86, %getitem_115), kwargs = {}) # %add_92 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_114, 1e-05), kwargs = {}) # %sqrt_28 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_92,), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_56, %sqrt_28), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_34 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_34', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_34', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ta/ctaihcjspdz63grwe4pfyyqfgwiuwoinc6dsakq2ggu274bly3er.py # Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_28 => add_93, rsqrt_28, var_mean_57 # Graph fragment: # %var_mean_57 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_56, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_93 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_116, 1e-06), kwargs = {}) # %rsqrt_28 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_93,), kwargs = {}) triton_red_fused_native_group_norm_35 = async_compile.triton('triton_red_fused_native_group_norm_35', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = (rindex // 32) tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xi/cxiqxvbsz3ukssdeoyptshvpqvhiwaaf4kbrrdv5nfaasaybrl33.py # Topologically Sorted Source Nodes: [group_norm_28, y_32], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_28 => add_94, mul_57 # y_32 => relu_25 # Graph fragment: # %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_57, %unsqueeze_173), kwargs = {}) # %add_94 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_57, %unsqueeze_170), kwargs = {}) # %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_94,), kwargs = {}) triton_poi_fused_native_group_norm_relu_36 = async_compile.triton('triton_poi_fused_native_group_norm_relu_36', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_36', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ie/ciexv24o44oxfqehcf5g5n3uw7wm3rwzhg6j6yp6xehzai7x3ppj.py # Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_37, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_37 => add_95 # sqrt_29 => sqrt_29 # sub_29 => sub_58 # var_mean_29 => var_mean_58 # w_29 => div_29 # Graph fragment: # %var_mean_58 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_89, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_89, %getitem_119), kwargs = {}) # %add_95 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_118, 1e-05), kwargs = {}) # %sqrt_29 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_95,), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_58, %sqrt_29), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_37 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_37', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 16384], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_37', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (9216*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 9216.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (9216*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (9216*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yg/cygphweqnq7pnwvkahvghd3g67zml3uufl6pze324yvsgxttpecs.py # Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_29 => add_96, rsqrt_29, var_mean_59 # Graph fragment: # %var_mean_59 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_58, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_96 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_120, 1e-06), kwargs = {}) # %rsqrt_29 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_96,), kwargs = {}) triton_per_fused_native_group_norm_38 = async_compile.triton('triton_per_fused_native_group_norm_38', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_38', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 32 r3 = (rindex // 32) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (16384*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/l4/cl4rrnjrwmvzdp3wuhtfdwp35f7iqe3swf2ufqxhvpdgyd2ns24f.py # Topologically Sorted Source Nodes: [group_norm_29, y_33], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_29 => add_97, mul_59 # y_33 => relu_26 # Graph fragment: # %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_59, %unsqueeze_179), kwargs = {}) # %add_97 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_59, %unsqueeze_176), kwargs = {}) # %relu_26 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_97,), kwargs = {}) triton_poi_fused_native_group_norm_relu_39 = async_compile.triton('triton_poi_fused_native_group_norm_relu_39', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_39', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 16384) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5p/c5po2fxloubqkx7bxv5xz6kvdz6v4qojkssixn4dxk7vmlqdmvuc.py # Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_38, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_38 => add_98 # sqrt_30 => sqrt_30 # sub_30 => sub_60 # var_mean_30 => var_mean_60 # w_30 => div_30 # Graph fragment: # %var_mean_60 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_92, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_92, %getitem_123), kwargs = {}) # %add_98 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_122, 1e-05), kwargs = {}) # %sqrt_30 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_98,), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_60, %sqrt_30), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_40 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_40', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4096, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_40', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 4096 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wp/cwpa5qp2ta7tnws6bdr5vxs47sehbs5clrraba3gd4l7tispuaoa.py # Topologically Sorted Source Nodes: [y_34], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # y_34 => add_99, rsqrt_30, var_mean_61 # Graph fragment: # %var_mean_61 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_60, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_99 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_124, 1e-06), kwargs = {}) # %rsqrt_30 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_99,), kwargs = {}) triton_red_fused_native_group_norm_41 = async_compile.triton('triton_red_fused_native_group_norm_41', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 128 r3 = (rindex // 128) tmp0 = tl.load(in_ptr0 + (r2 + (128*x0) + (4096*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zw/czwdegbzheopnvwdl4eqnhwujnatwuxmgcovtppktuchxyyj6edp.py # Topologically Sorted Source Nodes: [residual_5, y_34, add_39, y_35], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_39 => add_101 # residual_5 => add_91, mul_55 # y_34 => add_100, mul_61 # y_35 => relu_27 # Graph fragment: # %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_55, %unsqueeze_167), kwargs = {}) # %add_91 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_55, %unsqueeze_164), kwargs = {}) # %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_61, %unsqueeze_185), kwargs = {}) # %add_100 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_61, %unsqueeze_182), kwargs = {}) # %add_101 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_91, %add_100), kwargs = {}) # %relu_27 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_101,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_42 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_42', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_42', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (4096*x2)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + (x3), None) tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 2048.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fc/cfccvugrjb5bchp7busa35nnjngrxenithy3hzc3iqtipvpmnaq7.py # Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_40, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_40 => add_102 # sqrt_31 => sqrt_31 # sub_31 => sub_62 # var_mean_31 => var_mean_62 # w_31 => div_31 # Graph fragment: # %var_mean_62 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_95, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_95, %getitem_127), kwargs = {}) # %add_102 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_126, 1e-05), kwargs = {}) # %sqrt_31 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_102,), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_62, %sqrt_31), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_43 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_43', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_43', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wz/cwzmw2mxocwzkaptk7ltuhxbrbaqhh3walrpi3otgnu3l4igiwew.py # Topologically Sorted Source Nodes: [y_38, add_43, y_39], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_43 => add_111 # y_38 => add_110, mul_67 # y_39 => relu_30 # Graph fragment: # %mul_67 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_67, %unsqueeze_203), kwargs = {}) # %add_110 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_67, %unsqueeze_200), kwargs = {}) # %add_111 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_27, %add_110), kwargs = {}) # %relu_30 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_111,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_44 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_44', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_44', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x3), None) tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (x3), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qr/cqrbpncc5yhew7raztwazh5qtqmewuygbnx7jaosdzutra7m33et.py # Topologically Sorted Source Nodes: [y_46, add_51, y_47], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_51 => add_131 # y_46 => add_130, mul_79 # y_47 => relu_36 # Graph fragment: # %mul_79 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_79, %unsqueeze_239), kwargs = {}) # %add_130 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_79, %unsqueeze_236), kwargs = {}) # %add_131 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_33, %add_130), kwargs = {}) # %relu_36 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_131,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_45 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_45', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_45', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = (yindex // 16) y0 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + ((32*y1) + (x2 // 128)), ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + ((32*y1) + (x2 // 128)), ymask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x2), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1, 1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (y0 + (16*x2) + (65536*y1)), tmp17, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ui/cuic7o4vbk3ddfcahfstl2n4yrl6azuxcyoqipnublq7krxibcnu.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward] # Source node to ATen node mapping: # Graph fragment: # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_36, 0), kwargs = {}) triton_poi_fused_threshold_backward_46 = async_compile.triton('triton_poi_fused_threshold_backward_46', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_backward_46', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = (yindex // 4096) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (y0 + (4096*x2) + (65536*y1)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121 = args args.clear() assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (256, ), (1, )) assert_size_stride(primals_4, (256, ), (1, )) assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_6, (1024, ), (1, )) assert_size_stride(primals_7, (1024, ), (1, )) assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (256, ), (1, )) assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_12, (256, ), (1, )) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_15, (1024, ), (1, )) assert_size_stride(primals_16, (1024, ), (1, )) assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_18, (256, ), (1, )) assert_size_stride(primals_19, (256, ), (1, )) assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_21, (256, ), (1, )) assert_size_stride(primals_22, (256, ), (1, )) assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_24, (1024, ), (1, )) assert_size_stride(primals_25, (1024, ), (1, )) assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_27, (256, ), (1, )) assert_size_stride(primals_28, (256, ), (1, )) assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_30, (256, ), (1, )) assert_size_stride(primals_31, (256, ), (1, )) assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_33, (1024, ), (1, )) assert_size_stride(primals_34, (1024, ), (1, )) assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_36, (256, ), (1, )) assert_size_stride(primals_37, (256, ), (1, )) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256, ), (1, )) assert_size_stride(primals_40, (256, ), (1, )) assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_42, (1024, ), (1, )) assert_size_stride(primals_43, (1024, ), (1, )) assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_45, (2048, ), (1, )) assert_size_stride(primals_46, (2048, ), (1, )) assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_48, (512, ), (1, )) assert_size_stride(primals_49, (512, ), (1, )) assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_51, (512, ), (1, )) assert_size_stride(primals_52, (512, ), (1, )) assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_54, (2048, ), (1, )) assert_size_stride(primals_55, (2048, ), (1, )) assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_57, (512, ), (1, )) assert_size_stride(primals_58, (512, ), (1, )) assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_60, (512, ), (1, )) assert_size_stride(primals_61, (512, ), (1, )) assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_63, (2048, ), (1, )) assert_size_stride(primals_64, (2048, ), (1, )) assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_66, (512, ), (1, )) assert_size_stride(primals_67, (512, ), (1, )) assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_69, (512, ), (1, )) assert_size_stride(primals_70, (512, ), (1, )) assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_72, (2048, ), (1, )) assert_size_stride(primals_73, (2048, ), (1, )) assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_75, (512, ), (1, )) assert_size_stride(primals_76, (512, ), (1, )) assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_78, (512, ), (1, )) assert_size_stride(primals_79, (512, ), (1, )) assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_81, (2048, ), (1, )) assert_size_stride(primals_82, (2048, ), (1, )) assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_84, (4096, ), (1, )) assert_size_stride(primals_85, (4096, ), (1, )) assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_87, (1024, ), (1, )) assert_size_stride(primals_88, (1024, ), (1, )) assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_90, (1024, ), (1, )) assert_size_stride(primals_91, (1024, ), (1, )) assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_93, (4096, ), (1, )) assert_size_stride(primals_94, (4096, ), (1, )) assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_96, (1024, ), (1, )) assert_size_stride(primals_97, (1024, ), (1, )) assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_99, (1024, ), (1, )) assert_size_stride(primals_100, (1024, ), (1, )) assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_102, (4096, ), (1, )) assert_size_stride(primals_103, (4096, ), (1, )) assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_105, (1024, ), (1, )) assert_size_stride(primals_106, (1024, ), (1, )) assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_108, (1024, ), (1, )) assert_size_stride(primals_109, (1024, ), (1, )) assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_111, (4096, ), (1, )) assert_size_stride(primals_112, (4096, ), (1, )) assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_114, (1024, ), (1, )) assert_size_stride(primals_115, (1024, ), (1, )) assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_117, (1024, ), (1, )) assert_size_stride(primals_118, (1024, ), (1, )) assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_120, (4096, ), (1, )) assert_size_stride(primals_121, (4096, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 768, 49, grid=grid(768, 49), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_2, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_11, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_11 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_20, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_20 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_29, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_29 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_38, buf5, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_38 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_50, buf6, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_50 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_59, buf7, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_59 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_68, buf8, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_68 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_77, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_77 buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_89, buf10, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_89 buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_98, buf11, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_98 buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_107, buf12, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_107 buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_116, buf13, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_116 buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf15 # reuse buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32) # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_5.run(buf17, buf0, buf18, 256, 147, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_6.run(buf19, buf20, buf21, buf23, 128, 8192, grid=grid(128), stream=stream0) buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256), torch.float32) # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_7.run(buf19, buf20, buf21, primals_3, primals_4, buf24, 1048576, grid=grid(1048576), stream=stream0) del primals_4 buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.int8) # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_8.run(buf24, buf25, buf26, 230400, grid=grid(230400), stream=stream0) buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf28 # reuse buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf30, primals_5, buf31, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_10.run(buf32, buf33, buf34, buf36, 4096, 225, grid=grid(4096), stream=stream0) buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf38 # reuse buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf40, primals_8, buf41, 256, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf43 = buf21; del buf21 # reuse buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf42, buf43, buf44, buf46, 128, 1800, grid=grid(128), stream=stream0) buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_2, y], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf42, buf43, buf44, primals_9, primals_10, buf47, 230400, grid=grid(230400), stream=stream0) del primals_10 buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf49 # reuse buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf51, buf2, buf52, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf54 = buf44; del buf44 # reuse buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf53, buf54, buf55, buf57, 128, 1800, grid=grid(128), stream=stream0) buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_3, y_1], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf53, buf54, buf55, primals_12, primals_13, buf58, 230400, grid=grid(230400), stream=stream0) del primals_13 buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf60 # reuse buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_4, sub_4, add_4, sqrt_4, w_4], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf62, primals_14, buf63, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf65 = buf55; del buf55 # reuse buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf64, buf65, buf66, buf68, 128, 7200, grid=grid(128), stream=stream0) buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) buf70 = buf69; del buf69 # reuse # Topologically Sorted Source Nodes: [residual_1, y_2, add_5, y_3], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_16.run(buf70, buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66, primals_15, primals_16, 921600, grid=grid(921600), stream=stream0) del primals_16 del primals_7 buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf72 # reuse buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf74, primals_17, buf75, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf77 = buf66; del buf66 # reuse buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_5], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf76, buf77, buf78, buf80, 128, 1800, grid=grid(128), stream=stream0) buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_5, y_4], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf76, buf77, buf78, primals_18, primals_19, buf81, 230400, grid=grid(230400), stream=stream0) del primals_19 buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf83 # reuse buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_6, sub_6, add_7, sqrt_6, w_6], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf85, buf3, buf86, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf88 = buf78; del buf78 # reuse buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_6], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf87, buf88, buf89, buf91, 128, 1800, grid=grid(128), stream=stream0) buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_6, y_5], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf87, buf88, buf89, primals_21, primals_22, buf92, 230400, grid=grid(230400), stream=stream0) del primals_22 buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf94 # reuse buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_7, sub_7, add_8, sqrt_7, w_7], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf96, primals_23, buf97, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf99 = buf89; del buf89 # reuse buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_6], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf98, buf99, buf100, buf102, 128, 7200, grid=grid(128), stream=stream0) buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) # Topologically Sorted Source Nodes: [y_6, add_9, y_7], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_18.run(buf70, buf98, buf99, buf100, primals_24, primals_25, buf103, 921600, grid=grid(921600), stream=stream0) del primals_25 buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf105 # reuse buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_8, sub_8, add_10, sqrt_8, w_8], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf107, primals_26, buf108, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf110 = buf100; del buf100 # reuse buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_8], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf109, buf110, buf111, buf113, 128, 1800, grid=grid(128), stream=stream0) buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_8, y_8], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf109, buf110, buf111, primals_27, primals_28, buf114, 230400, grid=grid(230400), stream=stream0) del primals_28 buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf116 # reuse buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_9, sub_9, add_11, sqrt_9, w_9], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf118, buf4, buf119, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf121 = buf111; del buf111 # reuse buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_9], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf120, buf121, buf122, buf124, 128, 1800, grid=grid(128), stream=stream0) buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_9, y_9], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf120, buf121, buf122, primals_30, primals_31, buf125, 230400, grid=grid(230400), stream=stream0) del primals_31 buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf127 # reuse buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_10, sub_10, add_12, sqrt_10, w_10], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf129, primals_32, buf130, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf132 = buf122; del buf122 # reuse buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_10], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf131, buf132, buf133, buf135, 128, 7200, grid=grid(128), stream=stream0) buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) # Topologically Sorted Source Nodes: [y_10, add_13, y_11], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_18.run(buf103, buf131, buf132, buf133, primals_33, primals_34, buf136, 921600, grid=grid(921600), stream=stream0) del primals_34 buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf138 # reuse buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_11, sub_11, add_14, sqrt_11, w_11], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf140, primals_35, buf141, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf143 = buf133; del buf133 # reuse buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_11], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf142, buf143, buf144, buf146, 128, 1800, grid=grid(128), stream=stream0) buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_11, y_12], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf142, buf143, buf144, primals_36, primals_37, buf147, 230400, grid=grid(230400), stream=stream0) del primals_37 buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf149 # reuse buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_12, sub_12, add_15, sqrt_12, w_12], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf151, buf5, buf152, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf154 = buf144; del buf144 # reuse buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_12], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf153, buf154, buf155, buf157, 128, 1800, grid=grid(128), stream=stream0) buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_12, y_13], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf153, buf154, buf155, primals_39, primals_40, buf158, 230400, grid=grid(230400), stream=stream0) del primals_40 buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf160 # reuse buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_13, sub_13, add_16, sqrt_13, w_13], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf162, primals_41, buf163, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf165 = buf155; del buf155 # reuse buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_14], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf164, buf165, buf166, buf168, 128, 7200, grid=grid(128), stream=stream0) buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) # Topologically Sorted Source Nodes: [y_14, add_17, y_15], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_18.run(buf136, buf164, buf165, buf166, primals_42, primals_43, buf169, 921600, grid=grid(921600), stream=stream0) del primals_43 buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf171 # reuse buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_18, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_19.run(buf173, primals_44, buf174, 2048, 1024, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution] buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) # Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_20.run(buf175, buf176, buf177, buf179, 8192, 64, grid=grid(8192), stream=stream0) buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf181 # reuse buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_19, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_21.run(buf183, primals_47, buf184, 512, 1024, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512)) buf186 = buf166; del buf166 # reuse buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_22.run(buf185, buf186, buf187, buf189, 128, 3600, grid=grid(128), stream=stream0) buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_15, y_16], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_23.run(buf185, buf186, buf187, primals_48, primals_49, buf190, 460800, grid=grid(460800), stream=stream0) del primals_49 buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf192 # reuse buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_20, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf194, buf6, buf195, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf197 = buf187; del buf187 # reuse buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf196, buf197, buf198, buf200, 128, 1024, grid=grid(128), stream=stream0) buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_16, y_17], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf196, buf197, buf198, primals_51, primals_52, buf201, 131072, grid=grid(131072), stream=stream0) del primals_52 buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf203 # reuse buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_21, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf205, primals_53, buf206, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf208 = buf198; del buf198 # reuse buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf207, buf208, buf209, buf211, 128, 4096, grid=grid(128), stream=stream0) buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) buf213 = buf212; del buf212 # reuse # Topologically Sorted Source Nodes: [residual_3, y_18, add_22, y_19], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_29.run(buf213, buf175, buf176, buf177, primals_45, primals_46, buf207, buf208, buf209, primals_54, primals_55, 524288, grid=grid(524288), stream=stream0) del buf177 del primals_46 del primals_55 buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf215 # reuse buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_23, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf217, primals_56, buf218, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution] buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf220 = buf209; del buf209 # reuse buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_18], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf219, buf220, buf221, buf223, 128, 1024, grid=grid(128), stream=stream0) buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_18, y_20], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf219, buf220, buf221, primals_57, primals_58, buf224, 131072, grid=grid(131072), stream=stream0) del primals_58 buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf226 # reuse buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_19, sub_19, add_24, sqrt_19, w_19], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf228, buf7, buf229, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution] buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf231 = buf221; del buf221 # reuse buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_19], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf230, buf231, buf232, buf234, 128, 1024, grid=grid(128), stream=stream0) buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_19, y_21], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf230, buf231, buf232, primals_60, primals_61, buf235, 131072, grid=grid(131072), stream=stream0) del primals_61 buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf237 # reuse buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_20, sub_20, add_25, sqrt_20, w_20], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf239, primals_62, buf240, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution] buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf242 = buf232; del buf232 # reuse buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_22], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf241, buf242, buf243, buf245, 128, 4096, grid=grid(128), stream=stream0) buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [y_22, add_26, y_23], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_31.run(buf213, buf241, buf242, buf243, primals_63, primals_64, buf246, 524288, grid=grid(524288), stream=stream0) del primals_64 buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf248 # reuse buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_21, sub_21, add_27, sqrt_21, w_21], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf250, primals_65, buf251, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf253 = buf243; del buf243 # reuse buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_21], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf252, buf253, buf254, buf256, 128, 1024, grid=grid(128), stream=stream0) buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_21, y_24], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf252, buf253, buf254, primals_66, primals_67, buf257, 131072, grid=grid(131072), stream=stream0) del primals_67 buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf259 # reuse buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_22, sub_22, add_28, sqrt_22, w_22], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf261, buf8, buf262, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution] buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf264 = buf254; del buf254 # reuse buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_22], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf263, buf264, buf265, buf267, 128, 1024, grid=grid(128), stream=stream0) buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_22, y_25], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf263, buf264, buf265, primals_69, primals_70, buf268, 131072, grid=grid(131072), stream=stream0) del primals_70 buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf270 # reuse buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_23, sub_23, add_29, sqrt_23, w_23], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf272, primals_71, buf273, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_23], Original ATen: [aten.convolution] buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf275 = buf265; del buf265 # reuse buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_26], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf274, buf275, buf276, buf278, 128, 4096, grid=grid(128), stream=stream0) buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [y_26, add_30, y_27], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_31.run(buf246, buf274, buf275, buf276, primals_72, primals_73, buf279, 524288, grid=grid(524288), stream=stream0) del primals_73 buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf281 # reuse buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_24, sub_24, add_31, sqrt_24, w_24], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf283, primals_74, buf284, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_24], Original ATen: [aten.convolution] buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf286 = buf276; del buf276 # reuse buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_24], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf285, buf286, buf287, buf289, 128, 1024, grid=grid(128), stream=stream0) buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_24, y_28], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf285, buf286, buf287, primals_75, primals_76, buf290, 131072, grid=grid(131072), stream=stream0) del primals_76 buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf292 # reuse buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_25, sub_25, add_32, sqrt_25, w_25], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf294, buf9, buf295, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_25], Original ATen: [aten.convolution] buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf297 = buf287; del buf287 # reuse buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf296, buf297, buf298, buf300, 128, 1024, grid=grid(128), stream=stream0) buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_25, y_29], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf296, buf297, buf298, primals_78, primals_79, buf301, 131072, grid=grid(131072), stream=stream0) del primals_79 buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf303 # reuse buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_26, sub_26, add_33, sqrt_26, w_26], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf305, primals_80, buf306, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_26], Original ATen: [aten.convolution] buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf308 = buf298; del buf298 # reuse buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_30], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf307, buf308, buf309, buf311, 128, 4096, grid=grid(128), stream=stream0) buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [y_30, add_34, y_31], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_31.run(buf279, buf307, buf308, buf309, primals_81, primals_82, buf312, 524288, grid=grid(524288), stream=stream0) del primals_82 buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096, 4096), 0); del buf34 # reuse buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf314 # reuse buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_35, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_32.run(buf316, primals_83, buf317, 4096, 2048, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [residual_4], Original ATen: [aten.convolution] buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32) buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32) buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32) # Topologically Sorted Source Nodes: [residual_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_33.run(buf318, buf319, buf320, buf322, 16384, 16, grid=grid(16384), stream=stream0) buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf324 # reuse buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_36, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_34.run(buf326, primals_86, buf327, 1024, 2048, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_28], Original ATen: [aten.convolution] buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024)) buf329 = buf309; del buf309 # reuse buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_35.run(buf328, buf329, buf330, buf332, 128, 2048, grid=grid(128), stream=stream0) buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_28, y_32], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_36.run(buf328, buf329, buf330, primals_87, primals_88, buf333, 262144, grid=grid(262144), stream=stream0) del primals_88 buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf335 # reuse buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_37, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf337, buf10, buf338, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_29], Original ATen: [aten.convolution] buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf340 = buf330; del buf330 # reuse buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf339, buf340, buf341, buf343, 128, 512, grid=grid(128), stream=stream0) buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_29, y_33], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf339, buf340, buf341, primals_90, primals_91, buf344, 65536, grid=grid(65536), stream=stream0) del primals_91 buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf346 # reuse buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_38, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf348, primals_92, buf349, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_30], Original ATen: [aten.convolution] buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf351 = buf341; del buf341 # reuse buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_34], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf350, buf351, buf352, buf354, 128, 2048, grid=grid(128), stream=stream0) buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) buf356 = buf355; del buf355 # reuse # Topologically Sorted Source Nodes: [residual_5, y_34, add_39, y_35], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_42.run(buf356, buf318, buf319, buf320, primals_84, primals_85, buf350, buf351, buf352, primals_93, primals_94, 262144, grid=grid(262144), stream=stream0) del buf320 del primals_85 del primals_94 buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf358 # reuse buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_40, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf360, primals_95, buf361, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_31], Original ATen: [aten.convolution] buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf363 = buf352; del buf352 # reuse buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_31], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf362, buf363, buf364, buf366, 128, 512, grid=grid(128), stream=stream0) buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_31, y_36], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf362, buf363, buf364, primals_96, primals_97, buf367, 65536, grid=grid(65536), stream=stream0) del primals_97 buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf369 # reuse buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_32, sub_32, add_41, sqrt_32, w_32], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf371, buf11, buf372, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_32], Original ATen: [aten.convolution] buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf374 = buf364; del buf364 # reuse buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_32], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf373, buf374, buf375, buf377, 128, 512, grid=grid(128), stream=stream0) buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_32, y_37], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf373, buf374, buf375, primals_99, primals_100, buf378, 65536, grid=grid(65536), stream=stream0) del primals_100 buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf380 # reuse buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_33, sub_33, add_42, sqrt_33, w_33], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf382, primals_101, buf383, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_33], Original ATen: [aten.convolution] buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf385 = buf375; del buf375 # reuse buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_38], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf384, buf385, buf386, buf388, 128, 2048, grid=grid(128), stream=stream0) buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) # Topologically Sorted Source Nodes: [y_38, add_43, y_39], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_44.run(buf356, buf384, buf385, buf386, primals_102, primals_103, buf389, 262144, grid=grid(262144), stream=stream0) del primals_103 buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf391 # reuse buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_34, sub_34, add_44, sqrt_34, w_34], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf393, primals_104, buf394, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_34], Original ATen: [aten.convolution] buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf396 = buf386; del buf386 # reuse buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_34], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf395, buf396, buf397, buf399, 128, 512, grid=grid(128), stream=stream0) buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_34, y_40], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf395, buf396, buf397, primals_105, primals_106, buf400, 65536, grid=grid(65536), stream=stream0) del primals_106 buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf402 # reuse buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_35, sub_35, add_45, sqrt_35, w_35], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf404, buf12, buf405, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_35], Original ATen: [aten.convolution] buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf407 = buf397; del buf397 # reuse buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_35], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf406, buf407, buf408, buf410, 128, 512, grid=grid(128), stream=stream0) buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_35, y_41], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf406, buf407, buf408, primals_108, primals_109, buf411, 65536, grid=grid(65536), stream=stream0) del primals_109 buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf413 # reuse buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_36, sub_36, add_46, sqrt_36, w_36], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf415, primals_110, buf416, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_36], Original ATen: [aten.convolution] buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf418 = buf408; del buf408 # reuse buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_42], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf417, buf418, buf419, buf421, 128, 2048, grid=grid(128), stream=stream0) buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) # Topologically Sorted Source Nodes: [y_42, add_47, y_43], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_44.run(buf389, buf417, buf418, buf419, primals_111, primals_112, buf422, 262144, grid=grid(262144), stream=stream0) del primals_112 buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf424 # reuse buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_37, sub_37, add_48, sqrt_37, w_37], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf426, primals_113, buf427, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_37], Original ATen: [aten.convolution] buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf429 = buf419; del buf419 # reuse buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf428, buf429, buf430, buf432, 128, 512, grid=grid(128), stream=stream0) buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_37, y_44], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf428, buf429, buf430, primals_114, primals_115, buf433, 65536, grid=grid(65536), stream=stream0) del primals_115 buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf435 # reuse buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_38, sub_38, add_49, sqrt_38, w_38], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf437, buf13, buf438, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_38], Original ATen: [aten.convolution] buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf440 = buf430; del buf430 # reuse buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf439, buf440, buf441, buf443, 128, 512, grid=grid(128), stream=stream0) buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_38, y_45], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf439, buf440, buf441, primals_117, primals_118, buf444, 65536, grid=grid(65536), stream=stream0) del primals_118 buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf446 # reuse buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_39, sub_39, add_50, sqrt_39, w_39], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf448, primals_119, buf449, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_39], Original ATen: [aten.convolution] buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf451 = buf441; del buf441 # reuse buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_46], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf450, buf451, buf452, buf454, 128, 2048, grid=grid(128), stream=stream0) buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_46, add_51, y_47], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_45.run(buf422, buf450, buf451, buf452, primals_120, primals_121, buf455, 64, 4096, grid=grid(64, 4096), stream=stream0) del buf452 del primals_121 buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward] triton_poi_fused_threshold_backward_46.run(buf455, buf456, 16384, 16, grid=grid(16384, 16), stream=stream0) return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8, primals_9, buf2, primals_12, primals_14, primals_15, primals_17, primals_18, buf3, primals_21, primals_23, primals_24, primals_26, primals_27, buf4, primals_30, primals_32, primals_33, primals_35, primals_36, buf5, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, buf6, primals_51, primals_53, primals_54, primals_56, primals_57, buf7, primals_60, primals_62, primals_63, primals_65, primals_66, buf8, primals_69, primals_71, primals_72, primals_74, primals_75, buf9, primals_78, primals_80, primals_81, primals_83, primals_84, primals_86, primals_87, buf10, primals_90, primals_92, primals_93, primals_95, primals_96, buf11, primals_99, primals_101, primals_102, primals_104, primals_105, buf12, primals_108, primals_110, primals_111, primals_113, primals_114, buf13, primals_117, primals_119, primals_120, buf17, buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0), reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26, buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1), 0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40, buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0), reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52, buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0), reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63, buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0), reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75, buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0), reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86, buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0), reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97, buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0), reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107, buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0), reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118, buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0), reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129, buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0), reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140, buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0), reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151, buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0), reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162, buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0), reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173, buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0), reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0), reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0), reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250, buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0), reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261, buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0), reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272, buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0), reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283, buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0), reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294, buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0), reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305, buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0), reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316, buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0), reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327, buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0), reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337, buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0), reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348, buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360, buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0), reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371, buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0), reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382, buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415, buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0), reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426, buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0), reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437, buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0), reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448, buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0), reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((2048, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((512, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_52 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_53 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_54 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_55 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_56 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_57 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_58 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_59 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_60 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_61 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_62 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_63 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_64 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_65 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_66 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_67 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_68 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_69 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_70 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_71 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_72 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_73 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_74 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_75 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_76 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_77 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_78 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_79 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_80 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_81 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_82 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_83 = rand_strided((4096, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_84 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_85 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_86 = rand_strided((1024, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_87 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_88 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_89 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_90 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_91 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_92 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_93 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_94 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_95 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_96 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_97 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_98 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_99 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_100 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_101 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_102 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_103 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_104 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_105 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_106 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_107 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_108 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_109 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_110 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_111 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_112 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_113 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_114 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_115 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_116 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_117 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_118 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_119 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_120 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_121 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from collections import OrderedDict import torch.nn.functional as F def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-05) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv1 = conv1x1(cin, cmid, bias=False) self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv2 = conv3x3(cmid, cmid, stride, bias=False) self.gn3 = nn.GroupNorm(32, cout, eps=1e-06) self.conv3 = conv1x1(cmid, cout, bias=False) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride, bias=False) self.gn_proj = nn.GroupNorm(cout, cout) def forward(self, x): residual = x if hasattr(self, 'downsample'): residual = self.downsample(x) residual = self.gn_proj(residual) y = self.relu(self.gn1(self.conv1(x))) y = self.relu(self.gn2(self.conv2(y))) y = self.gn3(self.conv3(y)) y = self.relu(residual + y) return y def load_from(self, weights, n_block, n_unit): conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel') ], conv=True) conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel') ], conv=True) conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel') ], conv=True) gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')]) gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')]) gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')]) gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')]) gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')]) gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')]) self.conv1.weight.copy_(conv1_weight) self.conv2.weight.copy_(conv2_weight) self.conv3.weight.copy_(conv3_weight) self.gn1.weight.copy_(gn1_weight.view(-1)) self.gn1.bias.copy_(gn1_bias.view(-1)) self.gn2.weight.copy_(gn2_weight.view(-1)) self.gn2.bias.copy_(gn2_bias.view(-1)) self.gn3.weight.copy_(gn3_weight.view(-1)) self.gn3.bias.copy_(gn3_bias.view(-1)) if hasattr(self, 'downsample'): proj_conv_weight = np2th(weights[pjoin(n_block, n_unit, 'conv_proj/kernel')], conv=True) proj_gn_weight = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/scale')]) proj_gn_bias = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/bias')]) self.downsample.weight.copy_(proj_conv_weight) self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) class ResNetV2(nn.Module): """Implementation of Pre-activation (v2) ResNet mode.""" def __init__(self, block_units, width_factor): super().__init__() width = int(64 * width_factor) self.width = width self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn. GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True )), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width * 4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 4, cout=width * 4, cmid=width)) for i in range(2, block_units[0 ] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8, cout=width * 8, cmid=width * 2)) for i in range(2, block_units[ 1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16, cout=width * 16, cmid=width * 4)) for i in range(2, block_units [2] + 1)])))])) def forward(self, x): x = self.root(x) x = self.body(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'block_units': [4, 4, 4], 'width_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from collections import OrderedDict import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 768 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 rnumel = 147 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 147, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 147.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = rindex // 8 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 262144 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = xindex // 262144 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 8192.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = xindex // 256 % 15 x2 = xindex // 3840 % 15 x3 = xindex // 57600 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp5 = tl.load(in_ptr0 + (8192 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp7 = tl.load(in_ptr0 + (8448 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp9 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp11 = tl.load(in_ptr0 + (16384 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp13 = tl.load(in_ptr0 + (16640 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp15 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x4, tmp16, xmask) tl.store(out_ptr1 + x4, tmp41, xmask) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 225 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex x0 = xindex % 1024 x1 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 1024 * r2 + 230400 * x1), rmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 225, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 225.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x3, tmp21, None) tl.store(out_ptr0 + x3, tmp10, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 1800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = rindex // 8 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 57600 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 1800.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 256 x2 = xindex // 57600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1800.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 256 rnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2304.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2304 * x0), tmp12, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 7200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 230400 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 7200.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 230400 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 225.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 7200.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 230400 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 7200.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2048 x1 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r2 + 131072 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 64, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 64.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x3, tmp18, None) tl.store(out_ptr0 + x3, tmp8, None) tl.store(out_ptr1 + x3, tmp13, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 3600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 115200 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 3600.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 115200 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 3600.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4608.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4608 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = rindex // 16 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 64 r3 = rindex // 64 tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 2048 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 2048 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 4096.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 4096.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask) @triton.jit def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 65536 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 16, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 16.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x3, tmp18, None) tl.store(out_ptr0 + x3, tmp8, None) tl.store(out_ptr1 + x3, tmp13, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 9216.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 9216 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 32 r3 = rindex // 32 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 16384 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 128 r3 = rindex // 128 tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 2048.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = yindex // 16 y0 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + (32 * y1 + x2 // 128), ymask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * y1 + x2 // 128), ymask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1, 1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp17, ymask) @triton.jit def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = yindex // 4096 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (y0 + 4096 * x2 + 65536 * y1), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121) = args args.clear() assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256,), (1,)) assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_6, (1024,), (1,)) assert_size_stride(primals_7, (1024,), (1,)) assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256,), (1,)) assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_12, (256,), (1,)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_15, (1024,), (1,)) assert_size_stride(primals_16, (1024,), (1,)) assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_18, (256,), (1,)) assert_size_stride(primals_19, (256,), (1,)) assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_21, (256,), (1,)) assert_size_stride(primals_22, (256,), (1,)) assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_24, (1024,), (1,)) assert_size_stride(primals_25, (1024,), (1,)) assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256,), (1,)) assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_30, (256,), (1,)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_33, (1024,), (1,)) assert_size_stride(primals_34, (1024,), (1,)) assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_36, (256,), (1,)) assert_size_stride(primals_37, (256,), (1,)) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256,), (1,)) assert_size_stride(primals_40, (256,), (1,)) assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_42, (1024,), (1,)) assert_size_stride(primals_43, (1024,), (1,)) assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_45, (2048,), (1,)) assert_size_stride(primals_46, (2048,), (1,)) assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_48, (512,), (1,)) assert_size_stride(primals_49, (512,), (1,)) assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_51, (512,), (1,)) assert_size_stride(primals_52, (512,), (1,)) assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_54, (2048,), (1,)) assert_size_stride(primals_55, (2048,), (1,)) assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_57, (512,), (1,)) assert_size_stride(primals_58, (512,), (1,)) assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_60, (512,), (1,)) assert_size_stride(primals_61, (512,), (1,)) assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_63, (2048,), (1,)) assert_size_stride(primals_64, (2048,), (1,)) assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_66, (512,), (1,)) assert_size_stride(primals_67, (512,), (1,)) assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_69, (512,), (1,)) assert_size_stride(primals_70, (512,), (1,)) assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_72, (2048,), (1,)) assert_size_stride(primals_73, (2048,), (1,)) assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_75, (512,), (1,)) assert_size_stride(primals_76, (512,), (1,)) assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_78, (512,), (1,)) assert_size_stride(primals_79, (512,), (1,)) assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_81, (2048,), (1,)) assert_size_stride(primals_82, (2048,), (1,)) assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_84, (4096,), (1,)) assert_size_stride(primals_85, (4096,), (1,)) assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_87, (1024,), (1,)) assert_size_stride(primals_88, (1024,), (1,)) assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_90, (1024,), (1,)) assert_size_stride(primals_91, (1024,), (1,)) assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_93, (4096,), (1,)) assert_size_stride(primals_94, (4096,), (1,)) assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_96, (1024,), (1,)) assert_size_stride(primals_97, (1024,), (1,)) assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_99, (1024,), (1,)) assert_size_stride(primals_100, (1024,), (1,)) assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_102, (4096,), (1,)) assert_size_stride(primals_103, (4096,), (1,)) assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_105, (1024,), (1,)) assert_size_stride(primals_106, (1024,), (1,)) assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_108, (1024,), (1,)) assert_size_stride(primals_109, (1024,), (1,)) assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_111, (4096,), (1,)) assert_size_stride(primals_112, (4096,), (1,)) assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_114, (1024,), (1,)) assert_size_stride(primals_115, (1024,), (1,)) assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_117, (1024,), (1,)) assert_size_stride(primals_118, (1024,), (1,)) assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_120, (4096,), (1,)) assert_size_stride(primals_121, (4096,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_11, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_11 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_20, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_29, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_29 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_38, buf5, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_38 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_50, buf6, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_50 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_59, buf7, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_59 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_68, buf8, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_68 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_77, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_77 buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_89, buf10, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_89 buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_98, buf11, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_98 buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_107, buf12, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_107 buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_116, buf13, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_116 buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf15 buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) triton_per_fused_add_div_sqrt_sub_var_mean_5[grid(256)](buf17, buf0, buf18, 256, 147, XBLOCK=1, num_warps=2, num_stages=1) buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_6[grid(128)](buf19, buf20, buf21, buf23, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256), torch.float32) triton_poi_fused_native_group_norm_relu_7[grid(1048576)](buf19, buf20, buf21, primals_3, primals_4, buf24, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_4 buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf24, buf25, buf26, 230400, XBLOCK=512, num_warps=8, num_stages=1) buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf28 buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf30, primals_5, buf31, 1024, 256, num_warps=2, num_stages=1) buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) triton_per_fused_native_group_norm_10[grid(4096)](buf32, buf33, buf34, buf36, 4096, 225, XBLOCK=1, num_warps=2, num_stages=1) buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf38 buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(256)](buf40, primals_8, buf41, 256, 256, num_warps=2, num_stages=1) buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf43 = buf21 del buf21 buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf42, buf43, buf44, buf46, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf42, buf43, buf44, primals_9, primals_10, buf47, 230400, XBLOCK=512, num_warps=8, num_stages=1) del primals_10 buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf49 buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf51, buf2, buf52, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf54 = buf44 del buf44 buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf53, buf54, buf55, buf57, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf53, buf54, buf55, primals_12, primals_13, buf58, 230400, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf60 buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf62, primals_14, buf63, 1024, 256, num_warps=2, num_stages=1) buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf65 = buf55 del buf55 buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_15[grid(128)](buf64, buf65, buf66, buf68, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) buf70 = buf69 del buf69 triton_poi_fused_add_native_group_norm_relu_16[grid(921600)](buf70, buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66, primals_15, primals_16, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_16 del primals_7 buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf72 buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf74, primals_17, buf75, 256, 1024, num_warps=8, num_stages=1) buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf77 = buf66 del buf66 buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf76, buf77, buf78, buf80, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf76, buf77, buf78, primals_18, primals_19, buf81, 230400, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf83 buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf85, buf3, buf86, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf88 = buf78 del buf78 buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf87, buf88, buf89, buf91, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf87, buf88, buf89, primals_21, primals_22, buf92, 230400, XBLOCK=512, num_warps=8, num_stages=1) del primals_22 buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf94 buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf96, primals_23, buf97, 1024, 256, num_warps=2, num_stages=1) buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf99 = buf89 del buf89 buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf98, buf99, buf100, buf102, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf70, buf98, buf99, buf100, primals_24, primals_25, buf103, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf105 buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf107, primals_26, buf108, 256, 1024, num_warps=8, num_stages=1) buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf110 = buf100 del buf100 buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf109, buf110, buf111, buf113, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf109, buf110, buf111, primals_27, primals_28, buf114, 230400, XBLOCK= 512, num_warps=8, num_stages=1) del primals_28 buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf116 buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf118, buf4, buf119, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf121 = buf111 del buf111 buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf120, buf121, buf122, buf124, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf120, buf121, buf122, primals_30, primals_31, buf125, 230400, XBLOCK= 512, num_warps=8, num_stages=1) del primals_31 buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf127 buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf129, primals_32, buf130, 1024, 256, num_warps=2, num_stages=1) buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf132 = buf122 del buf122 buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf131, buf132, buf133, buf135, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf103, buf131, buf132, buf133, primals_33, primals_34, buf136, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_34 buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf138 buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf140, primals_35, buf141, 256, 1024, num_warps=8, num_stages=1) buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf143 = buf133 del buf133 buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf142, buf143, buf144, buf146, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf142, buf143, buf144, primals_36, primals_37, buf147, 230400, XBLOCK= 512, num_warps=8, num_stages=1) del primals_37 buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf149 buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf151, buf5, buf152, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf154 = buf144 del buf144 buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf153, buf154, buf155, buf157, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf153, buf154, buf155, primals_39, primals_40, buf158, 230400, XBLOCK= 512, num_warps=8, num_stages=1) del primals_40 buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf160 buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf162, primals_41, buf163, 1024, 256, num_warps=2, num_stages=1) buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf165 = buf155 del buf155 buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf164, buf165, buf166, buf168, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf136, buf164, buf165, buf166, primals_42, primals_43, buf169, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf171 buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf173, primals_44, buf174, 2048, 1024, num_warps=8, num_stages=1) buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) triton_per_fused_native_group_norm_20[grid(8192)](buf175, buf176, buf177, buf179, 8192, 64, XBLOCK=8, num_warps=4, num_stages=1) buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf181 buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_21[grid(512)](buf183, primals_47, buf184, 512, 1024, num_warps=8, num_stages=1) buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512)) buf186 = buf166 del buf166 buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_22[grid(128)](buf185, buf186, buf187, buf189, 128, 3600, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512 ), torch.float32) triton_poi_fused_native_group_norm_relu_23[grid(460800)](buf185, buf186, buf187, primals_48, primals_49, buf190, 460800, XBLOCK= 512, num_warps=8, num_stages=1) del primals_49 buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf192 buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf194, buf6, buf195, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf197 = buf187 del buf187 buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf196, buf197, buf198, buf200, 128, 1024, num_warps=8, num_stages=1) buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf196, buf197, buf198, primals_51, primals_52, buf201, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_52 buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf203 buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf205, primals_53, buf206, 2048, 512, num_warps=4, num_stages=1) buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf208 = buf198 del buf198 buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf207, buf208, buf209, buf211, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) buf213 = buf212 del buf212 triton_poi_fused_add_native_group_norm_relu_29[grid(524288)](buf213, buf175, buf176, buf177, primals_45, primals_46, buf207, buf208, buf209, primals_54, primals_55, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf177 del primals_46 del primals_55 buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf215 buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf217, primals_56, buf218, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf220 = buf209 del buf209 buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf219, buf220, buf221, buf223, 128, 1024, num_warps=8, num_stages=1) buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf219, buf220, buf221, primals_57, primals_58, buf224, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_58 buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf226 buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf228, buf7, buf229, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf231 = buf221 del buf221 buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf230, buf231, buf232, buf234, 128, 1024, num_warps=8, num_stages=1) buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf230, buf231, buf232, primals_60, primals_61, buf235, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_61 buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf237 buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf239, primals_62, buf240, 2048, 512, num_warps=4, num_stages=1) buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf242 = buf232 del buf232 buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf241, buf242, buf243, buf245, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf213, buf241, buf242, buf243, primals_63, primals_64, buf246, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_64 buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf248 buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf250, primals_65, buf251, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf253 = buf243 del buf243 buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf252, buf253, buf254, buf256, 128, 1024, num_warps=8, num_stages=1) buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf252, buf253, buf254, primals_66, primals_67, buf257, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_67 buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf259 buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf261, buf8, buf262, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf264 = buf254 del buf254 buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf263, buf264, buf265, buf267, 128, 1024, num_warps=8, num_stages=1) buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf263, buf264, buf265, primals_69, primals_70, buf268, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_70 buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf270 buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf272, primals_71, buf273, 2048, 512, num_warps=4, num_stages=1) buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf275 = buf265 del buf265 buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf274, buf275, buf276, buf278, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf246, buf274, buf275, buf276, primals_72, primals_73, buf279, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_73 buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf281 buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf283, primals_74, buf284, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf286 = buf276 del buf276 buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf285, buf286, buf287, buf289, 128, 1024, num_warps=8, num_stages=1) buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf285, buf286, buf287, primals_75, primals_76, buf290, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_76 buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf292 buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf294, buf9, buf295, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf297 = buf287 del buf287 buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf296, buf297, buf298, buf300, 128, 1024, num_warps=8, num_stages=1) buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf296, buf297, buf298, primals_78, primals_79, buf301, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_79 buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf303 buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf305, primals_80, buf306, 2048, 512, num_warps=4, num_stages=1) buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf308 = buf298 del buf298 buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf307, buf308, buf309, buf311, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf279, buf307, buf308, buf309, primals_81, primals_82, buf312, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_82 buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096, 4096), 0) del buf34 buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf314 buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf316, primals_83, buf317, 4096, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) triton_per_fused_native_group_norm_33[grid(16384)](buf318, buf319, buf320, buf322, 16384, 16, XBLOCK=32, num_warps=4, num_stages=1) buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf324 buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_34[grid(1024)](buf326, primals_86, buf327, 1024, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024)) buf329 = buf309 del buf309 buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_35[grid(128)](buf328, buf329, buf330, buf332, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_36[grid(262144)](buf328, buf329, buf330, primals_87, primals_88, buf333, 262144, XBLOCK= 512, num_warps=8, num_stages=1) del primals_88 buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf335 buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf337, buf10, buf338, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf340 = buf330 del buf330 buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf339, buf340, buf341, buf343, 128, 512, num_warps=4, num_stages=1) buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf339, buf340, buf341, primals_90, primals_91, buf344, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_91 buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf346 buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf348, primals_92, buf349, 4096, 1024, num_warps=8, num_stages=1) buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf351 = buf341 del buf341 buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf350, buf351, buf352, buf354, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) buf356 = buf355 del buf355 triton_poi_fused_add_native_group_norm_relu_42[grid(262144)](buf356, buf318, buf319, buf320, primals_84, primals_85, buf350, buf351, buf352, primals_93, primals_94, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf320 del primals_85 del primals_94 buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf358 buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf360, primals_95, buf361, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf363 = buf352 del buf352 buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf362, buf363, buf364, buf366, 128, 512, num_warps=4, num_stages=1) buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf362, buf363, buf364, primals_96, primals_97, buf367, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_97 buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf369 buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf371, buf11, buf372, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf374 = buf364 del buf364 buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf373, buf374, buf375, buf377, 128, 512, num_warps=4, num_stages=1) buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf373, buf374, buf375, primals_99, primals_100, buf378, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_100 buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf380 buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf382, primals_101, buf383, 4096, 1024, num_warps=8, num_stages=1) buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf385 = buf375 del buf375 buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf384, buf385, buf386, buf388, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf356, buf384, buf385, buf386, primals_102, primals_103, buf389, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_103 buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf391 buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf393, primals_104, buf394, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf396 = buf386 del buf386 buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf395, buf396, buf397, buf399, 128, 512, num_warps=4, num_stages=1) buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf395, buf396, buf397, primals_105, primals_106, buf400, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_106 buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf402 buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf404, buf12, buf405, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf407 = buf397 del buf397 buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf406, buf407, buf408, buf410, 128, 512, num_warps=4, num_stages=1) buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf406, buf407, buf408, primals_108, primals_109, buf411, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_109 buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf413 buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf415, primals_110, buf416, 4096, 1024, num_warps=8, num_stages=1) buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf418 = buf408 del buf408 buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf417, buf418, buf419, buf421, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf389, buf417, buf418, buf419, primals_111, primals_112, buf422, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_112 buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf424 buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf426, primals_113, buf427, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf429 = buf419 del buf419 buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf428, buf429, buf430, buf432, 128, 512, num_warps=4, num_stages=1) buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf428, buf429, buf430, primals_114, primals_115, buf433, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_115 buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf435 buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf437, buf13, buf438, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf440 = buf430 del buf430 buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf439, buf440, buf441, buf443, 128, 512, num_warps=4, num_stages=1) buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf439, buf440, buf441, primals_117, primals_118, buf444, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_118 buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf446 buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf448, primals_119, buf449, 4096, 1024, num_warps=8, num_stages=1) buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf451 = buf441 del buf441 buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf450, buf451, buf452, buf454, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1), torch.float32) triton_poi_fused_add_native_group_norm_relu_45[grid(64, 4096)](buf422, buf450, buf451, buf452, primals_120, primals_121, buf455, 64, 4096, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del buf452 del primals_121 buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.bool) triton_poi_fused_threshold_backward_46[grid(16384, 16)](buf455, buf456, 16384, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8, primals_9, buf2, primals_12, primals_14, primals_15, primals_17, primals_18, buf3, primals_21, primals_23, primals_24, primals_26, primals_27, buf4, primals_30, primals_32, primals_33, primals_35, primals_36, buf5, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, buf6, primals_51, primals_53, primals_54, primals_56, primals_57, buf7, primals_60, primals_62, primals_63, primals_65, primals_66, buf8, primals_69, primals_71, primals_72, primals_74, primals_75, buf9, primals_78, primals_80, primals_81, primals_83, primals_84, primals_86, primals_87, buf10, primals_90, primals_92, primals_93, primals_95, primals_96, buf11, primals_99, primals_101, primals_102, primals_104, primals_105, buf12, primals_108, primals_110, primals_111, primals_113, primals_114, buf13, primals_117, primals_119, primals_120, buf17, buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0), reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26, buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1), 0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40, buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0), reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52, buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0), reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63, buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0), reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75, buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0), reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86, buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0), reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97, buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0), reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107, buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0), reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118, buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0), reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129, buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0), reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140, buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0), reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151, buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0), reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162, buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0), reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173, buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0), reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0), reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0), reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250, buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0), reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261, buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0), reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272, buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0), reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283, buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0), reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294, buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0), reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305, buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0), reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316, buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0), reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327, buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0), reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337, buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0), reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348, buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360, buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0), reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371, buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0), reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382, buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415, buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0), reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426, buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0), reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437, buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0), reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448, buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0), reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456) def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-05) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv1 = conv1x1(cin, cmid, bias=False) self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv2 = conv3x3(cmid, cmid, stride, bias=False) self.gn3 = nn.GroupNorm(32, cout, eps=1e-06) self.conv3 = conv1x1(cmid, cout, bias=False) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride, bias=False) self.gn_proj = nn.GroupNorm(cout, cout) def forward(self, x): residual = x if hasattr(self, 'downsample'): residual = self.downsample(x) residual = self.gn_proj(residual) y = self.relu(self.gn1(self.conv1(x))) y = self.relu(self.gn2(self.conv2(y))) y = self.gn3(self.conv3(y)) y = self.relu(residual + y) return y def load_from(self, weights, n_block, n_unit): conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel') ], conv=True) conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel') ], conv=True) conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel') ], conv=True) gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')]) gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')]) gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')]) gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')]) gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')]) gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')]) self.conv1.weight.copy_(conv1_weight) self.conv2.weight.copy_(conv2_weight) self.conv3.weight.copy_(conv3_weight) self.gn1.weight.copy_(gn1_weight.view(-1)) self.gn1.bias.copy_(gn1_bias.view(-1)) self.gn2.weight.copy_(gn2_weight.view(-1)) self.gn2.bias.copy_(gn2_bias.view(-1)) self.gn3.weight.copy_(gn3_weight.view(-1)) self.gn3.bias.copy_(gn3_bias.view(-1)) if hasattr(self, 'downsample'): proj_conv_weight = np2th(weights[pjoin(n_block, n_unit, 'conv_proj/kernel')], conv=True) proj_gn_weight = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/scale')]) proj_gn_bias = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/bias')]) self.downsample.weight.copy_(proj_conv_weight) self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) class ResNetV2New(nn.Module): """Implementation of Pre-activation (v2) ResNet mode.""" def __init__(self, block_units, width_factor): super().__init__() width = int(64 * width_factor) self.width = width self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn. GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True )), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width * 4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 4, cout=width * 4, cmid=width)) for i in range(2, block_units[0 ] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8, cout=width * 8, cmid=width * 2)) for i in range(2, block_units[ 1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16, cout=width * 16, cmid=width * 4)) for i in range(2, block_units [2] + 1)])))])) def forward(self, input_0): primals_1 = self.root.conv.weight primals_3 = self.root.gn.weight primals_4 = self.root.gn.bias primals_9 = self.body.block1.unit1.gn1.weight primals_10 = self.body.block1.unit1.gn1.bias primals_8 = self.body.block1.unit1.conv1.weight primals_12 = self.body.block1.unit1.gn2.weight primals_13 = self.body.block1.unit1.gn2.bias primals_11 = self.body.block1.unit1.conv2.weight primals_6 = self.body.block1.unit1.gn3.weight primals_7 = self.body.block1.unit1.gn3.bias primals_5 = self.body.block1.unit1.conv3.weight primals_14 = self.body.block1.unit1.downsample.weight primals_15 = self.body.block1.unit1.gn_proj.weight primals_16 = self.body.block1.unit1.gn_proj.bias primals_18 = self.body.block1.unit2.gn1.weight primals_19 = self.body.block1.unit2.gn1.bias primals_17 = self.body.block1.unit2.conv1.weight primals_21 = self.body.block1.unit2.gn2.weight primals_22 = self.body.block1.unit2.gn2.bias primals_20 = self.body.block1.unit2.conv2.weight primals_24 = self.body.block1.unit2.gn3.weight primals_25 = self.body.block1.unit2.gn3.bias primals_23 = self.body.block1.unit2.conv3.weight primals_27 = self.body.block1.unit3.gn1.weight primals_28 = self.body.block1.unit3.gn1.bias primals_26 = self.body.block1.unit3.conv1.weight primals_30 = self.body.block1.unit3.gn2.weight primals_31 = self.body.block1.unit3.gn2.bias primals_29 = self.body.block1.unit3.conv2.weight primals_33 = self.body.block1.unit3.gn3.weight primals_34 = self.body.block1.unit3.gn3.bias primals_32 = self.body.block1.unit3.conv3.weight primals_36 = self.body.block1.unit4.gn1.weight primals_37 = self.body.block1.unit4.gn1.bias primals_35 = self.body.block1.unit4.conv1.weight primals_39 = self.body.block1.unit4.gn2.weight primals_40 = self.body.block1.unit4.gn2.bias primals_38 = self.body.block1.unit4.conv2.weight primals_42 = self.body.block1.unit4.gn3.weight primals_43 = self.body.block1.unit4.gn3.bias primals_41 = self.body.block1.unit4.conv3.weight primals_48 = self.body.block2.unit1.gn1.weight primals_49 = self.body.block2.unit1.gn1.bias primals_47 = self.body.block2.unit1.conv1.weight primals_51 = self.body.block2.unit1.gn2.weight primals_52 = self.body.block2.unit1.gn2.bias primals_50 = self.body.block2.unit1.conv2.weight primals_45 = self.body.block2.unit1.gn3.weight primals_46 = self.body.block2.unit1.gn3.bias primals_53 = self.body.block2.unit1.conv3.weight primals_44 = self.body.block2.unit1.downsample.weight primals_54 = self.body.block2.unit1.gn_proj.weight primals_55 = self.body.block2.unit1.gn_proj.bias primals_57 = self.body.block2.unit2.gn1.weight primals_58 = self.body.block2.unit2.gn1.bias primals_56 = self.body.block2.unit2.conv1.weight primals_60 = self.body.block2.unit2.gn2.weight primals_61 = self.body.block2.unit2.gn2.bias primals_59 = self.body.block2.unit2.conv2.weight primals_63 = self.body.block2.unit2.gn3.weight primals_64 = self.body.block2.unit2.gn3.bias primals_62 = self.body.block2.unit2.conv3.weight primals_66 = self.body.block2.unit3.gn1.weight primals_67 = self.body.block2.unit3.gn1.bias primals_65 = self.body.block2.unit3.conv1.weight primals_69 = self.body.block2.unit3.gn2.weight primals_70 = self.body.block2.unit3.gn2.bias primals_68 = self.body.block2.unit3.conv2.weight primals_72 = self.body.block2.unit3.gn3.weight primals_73 = self.body.block2.unit3.gn3.bias primals_71 = self.body.block2.unit3.conv3.weight primals_75 = self.body.block2.unit4.gn1.weight primals_76 = self.body.block2.unit4.gn1.bias primals_74 = self.body.block2.unit4.conv1.weight primals_78 = self.body.block2.unit4.gn2.weight primals_79 = self.body.block2.unit4.gn2.bias primals_77 = self.body.block2.unit4.conv2.weight primals_81 = self.body.block2.unit4.gn3.weight primals_82 = self.body.block2.unit4.gn3.bias primals_80 = self.body.block2.unit4.conv3.weight primals_87 = self.body.block3.unit1.gn1.weight primals_88 = self.body.block3.unit1.gn1.bias primals_86 = self.body.block3.unit1.conv1.weight primals_90 = self.body.block3.unit1.gn2.weight primals_91 = self.body.block3.unit1.gn2.bias primals_89 = self.body.block3.unit1.conv2.weight primals_84 = self.body.block3.unit1.gn3.weight primals_85 = self.body.block3.unit1.gn3.bias primals_92 = self.body.block3.unit1.conv3.weight primals_83 = self.body.block3.unit1.downsample.weight primals_93 = self.body.block3.unit1.gn_proj.weight primals_94 = self.body.block3.unit1.gn_proj.bias primals_96 = self.body.block3.unit2.gn1.weight primals_97 = self.body.block3.unit2.gn1.bias primals_95 = self.body.block3.unit2.conv1.weight primals_99 = self.body.block3.unit2.gn2.weight primals_100 = self.body.block3.unit2.gn2.bias primals_98 = self.body.block3.unit2.conv2.weight primals_102 = self.body.block3.unit2.gn3.weight primals_103 = self.body.block3.unit2.gn3.bias primals_101 = self.body.block3.unit2.conv3.weight primals_105 = self.body.block3.unit3.gn1.weight primals_106 = self.body.block3.unit3.gn1.bias primals_104 = self.body.block3.unit3.conv1.weight primals_108 = self.body.block3.unit3.gn2.weight primals_109 = self.body.block3.unit3.gn2.bias primals_107 = self.body.block3.unit3.conv2.weight primals_111 = self.body.block3.unit3.gn3.weight primals_112 = self.body.block3.unit3.gn3.bias primals_110 = self.body.block3.unit3.conv3.weight primals_114 = self.body.block3.unit4.gn1.weight primals_115 = self.body.block3.unit4.gn1.bias primals_113 = self.body.block3.unit4.conv1.weight primals_117 = self.body.block3.unit4.gn2.weight primals_118 = self.body.block3.unit4.gn2.bias primals_116 = self.body.block3.unit4.conv2.weight primals_120 = self.body.block3.unit4.gn3.weight primals_121 = self.body.block3.unit4.gn3.bias primals_119 = self.body.block3.unit4.conv3.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121]) return output[0]
Yifanfanfanfan/ViT-pytorch
ResNetV2
false
12,309
[ "MIT" ]
0
0f975aa7d3fd0aba6f74260c2b5a91786f1211ba
https://github.com/Yifanfanfanfan/ViT-pytorch/tree/0f975aa7d3fd0aba6f74260c2b5a91786f1211ba
NegSamplingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vz/cvzebhzwnxtfxcmfn6cjxhquzwxdt74jkr6hqhnry63fhgk7rgi5.py # Topologically Sorted Source Nodes: [mul, sigmoid, mean, neg], Original ATen: [aten.mul, aten.sigmoid, aten.mean, aten.neg] # Source node to ATen node mapping: # mean => mean # mul => mul # neg => neg # sigmoid => sigmoid # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sigmoid,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) triton_per_fused_mean_mul_neg_sigmoid_0 = async_compile.triton('triton_per_fused_mean_mul_neg_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_neg_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_neg_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = -tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, sigmoid, mean, neg], Original ATen: [aten.mul, aten.sigmoid, aten.mean, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_mean_mul_neg_sigmoid_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NegSamplingLoss(nn.Module): def __init__(self): super(NegSamplingLoss, self).__init__() def forward(self, score, sign): return -torch.mean(torch.sigmoid(sign * score)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_neg_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = -tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_neg_sigmoid_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class NegSamplingLossNew(nn.Module): def __init__(self): super(NegSamplingLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dreaming-qin/RecBole
NegSamplingLoss
false
12,310
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
InnerProductLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/2q/c2qgjmxnbjif6ni7qy2qblvtozrwd2f32jo4hex3rdtryfymbyul.py # Topologically Sorted Source Nodes: [mul, pos_score, neg, softplus, mul_1, neg_score, softplus_1, add, mean], Original ATen: [aten.mul, aten.sum, aten.neg, aten.softplus, aten.add, aten.mean] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # mul_1 => mul_1 # neg => neg # neg_score => sum_2 # pos_score => sum_1 # softplus => exp, gt, log1p, where # softplus_1 => exp_1, gt_1, log1p_1, where_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %neg : [num_users=3] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%neg, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %neg, %log1p), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg2_1), kwargs = {}) # %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_2, 20), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sum_2,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %sum_2, %log1p_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %where_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {}) triton_per_fused_add_mean_mul_neg_softplus_sum_0 = async_compile.triton('triton_per_fused_add_mean_mul_neg_softplus_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_softplus_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_neg_softplus_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp7 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp11 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp16 = tl.load(in_ptr2 + (r0 + (64*r1)), None) tmp18 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None) tmp21 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None) tmp24 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -tmp14 tmp17 = tmp0 * tmp16 tmp19 = tmp3 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp7 * tmp21 tmp23 = tmp20 + tmp22 tmp25 = tmp11 * tmp24 tmp26 = tmp23 + tmp25 tmp27 = 20.0 tmp28 = tmp15 > tmp27 tmp29 = tl_math.exp(tmp15) tmp30 = libdevice.log1p(tmp29) tmp31 = tl.where(tmp28, tmp15, tmp30) tmp32 = tmp26 > tmp27 tmp33 = tl_math.exp(tmp26) tmp34 = libdevice.log1p(tmp33) tmp35 = tl.where(tmp32, tmp26, tmp34) tmp36 = tmp31 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 64.0 tmp41 = tmp39 / tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp41, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul, pos_score, neg, softplus, mul_1, neg_score, softplus_1, add, mean], Original ATen: [aten.mul, aten.sum, aten.neg, aten.softplus, aten.add, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_neg_softplus_sum_0.run(buf3, arg1_1, arg0_1, arg2_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class InnerProductLoss(nn.Module): """This is the inner-product loss used in CFKG for optimization. """ def __init__(self): super(InnerProductLoss, self).__init__() def forward(self, anchor, positive, negative): pos_score = torch.mul(anchor, positive).sum(dim=1) neg_score = torch.mul(anchor, negative).sum(dim=1) return (F.softplus(-pos_score) + F.softplus(neg_score)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_neg_softplus_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp18 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp21 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -tmp14 tmp17 = tmp0 * tmp16 tmp19 = tmp3 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp7 * tmp21 tmp23 = tmp20 + tmp22 tmp25 = tmp11 * tmp24 tmp26 = tmp23 + tmp25 tmp27 = 20.0 tmp28 = tmp15 > tmp27 tmp29 = tl_math.exp(tmp15) tmp30 = libdevice.log1p(tmp29) tmp31 = tl.where(tmp28, tmp15, tmp30) tmp32 = tmp26 > tmp27 tmp33 = tl_math.exp(tmp26) tmp34 = libdevice.log1p(tmp33) tmp35 = tl.where(tmp32, tmp26, tmp34) tmp36 = tmp31 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 64.0 tmp41 = tmp39 / tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_mean_mul_neg_softplus_sum_0[grid(1)](buf3, arg1_1, arg0_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf3, class InnerProductLossNew(nn.Module): """This is the inner-product loss used in CFKG for optimization. """ def __init__(self): super(InnerProductLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
dreaming-qin/RecBole
InnerProductLoss
false
12,311
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
InnerProductLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cw/ccwvutm6zl37xd5gsiqvlmjxykaqval6ja4w7xuw5xctwfn5antt.py # Topologically Sorted Source Nodes: [p, q, inner_product, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] # Source node to ATen node mapping: # inner_product => mul # p => index # q => index_1 # sum_1 => sum_1 # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy_1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) triton_poi_fused_index_mul_sum_0 = async_compile.triton('triton_poi_fused_index_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x0 = xindex % 4 x2 = (xindex // 24) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + ((4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + ((4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (1 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (1 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (2 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (2 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (3 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (3 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + (x3), tmp36, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p, q, inner_product, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_index_mul_sum_0.run(arg0_1, buf0, 96, grid=grid(96), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class InnerProductLayer(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. """ def __init__(self, num_feature_field, device): """ Args: num_feature_field(int) :number of feature fields. device(torch.device) : device object of the model. """ super(InnerProductLayer, self).__init__() self.num_feature_field = num_feature_field self def forward(self, feat_emb): """ Args: feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size]. Returns: inner_product(torch.FloatTensor): The inner product of input tensor. shape of [batch_size, num_pairs] """ row = [] col = [] for i in range(self.num_feature_field - 1): for j in range(i + 1, self.num_feature_field): row.append(i) col.append(j) p = feat_emb[:, row] q = feat_emb[:, col] inner_product = p * q return inner_product.sum(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_feature_field': 4, 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sum_0[grid(96)](arg0_1, buf0, 96, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class InnerProductLayerNew(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. """ def __init__(self, num_feature_field, device): """ Args: num_feature_field(int) :number of feature fields. device(torch.device) : device object of the model. """ super(InnerProductLayerNew, self).__init__() self.num_feature_field = num_feature_field self def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dreaming-qin/RecBole
InnerProductLayer
false
12,312
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
ConvNCFBPRLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/k4/ck4thowjujufjoitsqemh6ciznb7idjuufiayjqrkybsqiz2p7z7.py # Topologically Sorted Source Nodes: [distance, neg, exp, add, log, loss], Original ATen: [aten.sub, aten.neg, aten.exp, aten.add, aten.log, aten.sum] # Source node to ATen node mapping: # add => add # distance => sub # exp => exp # log => log # loss => sum_1 # neg => neg # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%log,), kwargs = {}) triton_per_fused_add_exp_log_neg_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_log_neg_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_log_neg_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_log_neg_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = -tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = tl_math.log(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [distance, neg, exp, add, log, loss], Original ATen: [aten.sub, aten.neg, aten.exp, aten.add, aten.log, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_exp_log_neg_sub_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvNCFBPRLoss(nn.Module): """ ConvNCFBPRLoss, based on Bayesian Personalized Ranking, Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = ConvNCFBPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self): super(ConvNCFBPRLoss, self).__init__() def forward(self, pos_score, neg_score): distance = pos_score - neg_score loss = torch.sum(torch.log(1 + torch.exp(-distance))) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_log_neg_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = -tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = tl_math.log(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_exp_log_neg_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class ConvNCFBPRLossNew(nn.Module): """ ConvNCFBPRLoss, based on Bayesian Personalized Ranking, Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = ConvNCFBPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self): super(ConvNCFBPRLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dreaming-qin/RecBole
ConvNCFBPRLoss
false
12,313
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
BaseFactorizationMachine
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5v/c5vqsotjiyyydjenxs3ttprtusysjuigcmpcuavw4a5cbsh4movc.py # Topologically Sorted Source Nodes: [sum_1, square_of_sum, pow_2, sum_of_square, output, output_1, output_2], Original ATen: [aten.sum, aten.pow, aten.sub, aten.mul] # Source node to ATen node mapping: # output => sub # output_1 => sum_3 # output_2 => mul # pow_2 => pow_2 # square_of_sum => pow_1 # sum_1 => sum_1 # sum_of_square => sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, %sum_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [1], True), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.5), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + (x2), tmp68, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sum_1, square_of_sum, pow_2, sum_of_square, output, output_1, output_2], Original ATen: [aten.sum, aten.pow, aten.sub, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BaseFactorizationMachine(nn.Module): """Calculate FM result over the embeddings Args: reduce_sum: bool, whether to sum the result, default is True. Input: input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``. Output output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``. """ def __init__(self, reduce_sum=True): super(BaseFactorizationMachine, self).__init__() self.reduce_sum = reduce_sum def forward(self, input_x): square_of_sum = torch.sum(input_x, dim=1) ** 2 sum_of_square = torch.sum(input_x ** 2, dim=1) output = square_of_sum - sum_of_square if self.reduce_sum: output = torch.sum(output, dim=1, keepdim=True) output = 0.5 * output return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + x2, tmp68, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class BaseFactorizationMachineNew(nn.Module): """Calculate FM result over the embeddings Args: reduce_sum: bool, whether to sum the result, default is True. Input: input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``. Output output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``. """ def __init__(self, reduce_sum=True): super(BaseFactorizationMachineNew, self).__init__() self.reduce_sum = reduce_sum def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dreaming-qin/RecBole
BaseFactorizationMachine
false
12,314
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
AGRUCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/by/cbypqeb7lfdlbth5j2ww7h2bluyiqc2nrbnc76btfynxntusq5wb.py # Topologically Sorted Source Nodes: [add, reset_gate, mul, add_1, new_state, sub, mul_1, mul_2, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub, aten.tanh_backward] # Source node to ATen node mapping: # add => add # add_1 => add_1 # hy => add_2 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # new_state => tanh # reset_gate => sigmoid # sub => sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %tanh), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_4), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask) tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x2), xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp10 = tmp8 * tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp5 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tmp6 * tmp17 tmp19 = tmp10 + tmp18 tmp20 = tmp17 * tmp17 tmp21 = tmp7 - tmp20 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp19, xmask) tl.store(out_ptr2 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [gh], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, reset_gate, mul, add_1, new_state, sub, mul_1, mul_2, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub, aten.tanh_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0.run(buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0) del buf0 del primals_2 return (buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (16, 4), (12, 1), 8), buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AGRUCell(nn.Module): ' Attention based GRU (AGRU). AGRU uses the attention score to replace the update gate of GRU, and changes the\n hidden state directly.\n\n Formally:\n ..math: {h}_{t}^{\\prime}=\\left(1-a_{t}\right) * {h}_{t-1}^{\\prime}+a_{t} * \tilde{{h}}_{t}^{\\prime}\n\n :math:`{h}_{t}^{\\prime}`, :math:`h_{t-1}^{\\prime}`, :math:`{h}_{t-1}^{\\prime}`,\n :math: `\tilde{{h}}_{t}^{\\prime}` are the hidden state of AGRU\n\n ' def __init__(self, input_size, hidden_size, bias=True): super(AGRUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if self.bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input, hidden_output, att_score): gi = F.linear(input, self.weight_ih, self.bias_ih) gh = F.linear(hidden_output, self.weight_hh, self.bias_hh) i_r, _i_u, i_h = gi.chunk(3, 1) h_r, _h_u, h_h = gh.chunk(3, 1) reset_gate = torch.sigmoid(i_r + h_r) new_state = torch.tanh(i_h + reset_gate * h_h) att_score = att_score.view(-1, 1) hy = (1 - att_score) * hidden_output + att_score * new_state return hy def get_inputs(): return [torch.rand([16, 4]), torch.rand([16, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x2, xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp10 = tmp8 * tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp5 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tmp6 * tmp17 tmp19 = tmp10 + tmp18 tmp20 = tmp17 * tmp17 tmp21 = tmp7 - tmp20 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp19, xmask) tl.store(out_ptr2 + x2, tmp21, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0[grid(64)]( buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_2 return buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (16, 4), (12, 1), 8), buf2, buf4 class AGRUCellNew(nn.Module): ' Attention based GRU (AGRU). AGRU uses the attention score to replace the update gate of GRU, and changes the\n hidden state directly.\n\n Formally:\n ..math: {h}_{t}^{\\prime}=\\left(1-a_{t}\right) * {h}_{t-1}^{\\prime}+a_{t} * \tilde{{h}}_{t}^{\\prime}\n\n :math:`{h}_{t}^{\\prime}`, :math:`h_{t-1}^{\\prime}`, :math:`{h}_{t-1}^{\\prime}`,\n :math: `\tilde{{h}}_{t}^{\\prime}` are the hidden state of AGRU\n\n ' def __init__(self, input_size, hidden_size, bias=True): super(AGRUCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if self.bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input_0, input_1, input_2): primals_1 = self.weight_ih primals_4 = self.weight_hh primals_2 = self.bias_ih primals_5 = self.bias_hh primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
dreaming-qin/RecBole
AGRUCell
false
12,315
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
RegLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3l/c3lzkatm5eqqfbbtcqxxw7oujhlvnqrxq6gcyetj3kemmpsltiiy.py # Topologically Sorted Source Nodes: [reg_loss, norm_1, reg_loss_1, norm_2, reg_loss_2, norm_3, reg_loss_3], Original ATen: [aten.linalg_vector_norm, aten.add] # Source node to ATen node mapping: # norm_1 => pow_3, pow_4, sum_2 # norm_2 => pow_5, pow_6, sum_3 # norm_3 => pow_7, pow_8, sum_4 # reg_loss => pow_1, pow_2, sum_1 # reg_loss_1 => add # reg_loss_2 => add_1 # reg_loss_3 => add_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_2, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, None), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %pow_6), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, None), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %pow_8), kwargs = {}) triton_per_fused_add_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr0 + (64 + r0), None) tmp10 = tl.load(in_ptr0 + (128 + r0), None) tmp15 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = libdevice.sqrt(tmp4) tmp21 = libdevice.sqrt(tmp9) tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp14) tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp19) tmp26 = tmp24 + tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [reg_loss, norm_1, reg_loss_1, norm_2, reg_loss_2, norm_3, reg_loss_3], Original ATen: [aten.linalg_vector_norm, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_0.run(buf4, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RegLoss(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super(RegLoss, self).__init__() def forward(self, parameters): reg_loss = None for W in parameters: if reg_loss is None: reg_loss = W.norm(2) else: reg_loss = reg_loss + W.norm(2) return reg_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr0 + (64 + r0), None) tmp10 = tl.load(in_ptr0 + (128 + r0), None) tmp15 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = libdevice.sqrt(tmp4) tmp21 = libdevice.sqrt(tmp9) tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp14) tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp19) tmp26 = tmp24 + tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_0[grid(1)](buf4, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class RegLossNew(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super(RegLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dreaming-qin/RecBole
RegLoss
false
12,316
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
AttLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/36/c36xldnuc3t465xlrbmm4yvftzdrqqjr5avgkhmu7tkzbkcsyqp3.py # Topologically Sorted Source Nodes: [att_signal_1, att_signal_2, att_signal_3], Original ATen: [aten.relu, aten.mul, aten.sum] # Source node to ATen node mapping: # att_signal_1 => relu # att_signal_2 => mul # att_signal_3 => sum_1 # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %primals_3), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) triton_poi_fused_mul_relu_sum_0 = async_compile.triton('triton_poi_fused_mul_relu_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_relu_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp13 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp6 * tmp3 tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp10 * tmp3 tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = tmp14 * tmp3 tmp16 = tmp12 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dm/cdmkcxuzpnailvibeivaikqdr4zvashgzwju7qijhq5aizlo3aor.py # Topologically Sorted Source Nodes: [att_signal_4], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_signal_4 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sum_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kt/cktghousutx6xui2sl2rvevzmb7gkacvfhntjq5n2xzeu7v57oz6.py # Topologically Sorted Source Nodes: [att_signal_4], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_signal_4 => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_signal], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_signal_1, att_signal_2, att_signal_3], Original ATen: [aten.relu, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mul_relu_sum_0.run(buf0, primals_3, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_signal_4], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [att_signal_4], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 return (buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as fn class AttLayer(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super(AttLayer, self).__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, infeatures): att_signal = self.w(infeatures) att_signal = fn.relu(att_signal) att_signal = torch.mul(att_signal, self.h) att_signal = torch.sum(att_signal, dim=2) att_signal = fn.softmax(att_signal, dim=1) return att_signal def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'att_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp6 * tmp3 tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp10 * tmp3 tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = tmp14 * tmp3 tmp16 = tmp12 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_sum_0[grid(64)](buf0, primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf3 class AttLayerNew(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super(AttLayerNew, self).__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, input_0): primals_3 = self.h primals_1 = self.w.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dreaming-qin/RecBole
AttLayer
false
12,317
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
Repeat_Explore_Mechanism
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/lo/cloa3pjph6m2k24ygelmr3bqrhzqbnqcl7unftyarinyai76sqh6.py # Topologically Sorted Source Nodes: [last_memory_2, add, output_ere], Original ATen: [aten.repeat, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # last_memory_2 => repeat # output_ere => tanh # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 4, 1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %repeat), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_repeat_tanh_0 = async_compile.triton('triton_poi_fused_add_repeat_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_repeat_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [alpha_are], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha_are => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [alpha_are], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha_are => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/37/c37x3smg2vcipbhsjz546axeu2jge2hmqkrvktxkffkbchccjh4m.py # Topologically Sorted Source Nodes: [alpha_are_1, output_cre, output_cre_1], Original ATen: [aten.repeat, aten.mul, aten.sum] # Source node to ATen node mapping: # alpha_are_1 => repeat_1 # output_cre => mul # output_cre_1 => sum_2 # Graph fragment: # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%div, [1, 1, 4]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%repeat_1, %primals_1), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_repeat_sum_3 = async_compile.triton('triton_poi_fused_mul_repeat_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_repeat_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_repeat_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask) tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/m4/cm4zqjee2djtqrpv2iq3k3x5qsq6cqxcvagimjcpqnohz4rdhbaa.py # Topologically Sorted Source Nodes: [repeat_explore_mechanism], Original ATen: [aten._softmax] # Source node to ATen node mapping: # repeat_explore_mechanism => amax_1, div_1, exp_1, sub_1, sum_3 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mm_3, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm_3, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (2, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [last_memory_2, add, output_ere], Original ATen: [aten.repeat, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_repeat_tanh_0.run(buf2, buf1, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output_ere_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [alpha_are], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [alpha_are], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [alpha_are_1, output_cre, output_cre_1], Original ATen: [aten.repeat, aten.mul, aten.sum] triton_poi_fused_mul_repeat_sum_3.run(buf5, primals_1, buf6, 16, grid=grid(16), stream=stream0) buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [output_cre_2], Original ATen: [aten.mm] extern_kernels.mm(buf6, reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat_explore_mechanism], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf7, buf8, 8, grid=grid(8), stream=stream0) del buf7 return (buf8, primals_1, primals_4, buf2, buf5, buf6, buf8, primals_6, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Repeat_Explore_Mechanism(nn.Module): def __init__(self, device, hidden_size, seq_len, dropout_prob): super(Repeat_Explore_Mechanism, self).__init__() self.dropout = nn.Dropout(dropout_prob) self.hidden_size = hidden_size self.device = device self.seq_len = seq_len self.Wre = nn.Linear(hidden_size, hidden_size, bias=False) self.Ure = nn.Linear(hidden_size, hidden_size, bias=False) self.tanh = nn.Tanh() self.Vre = nn.Linear(hidden_size, 1, bias=False) self.Wcre = nn.Linear(hidden_size, 2, bias=False) def forward(self, all_memory, last_memory): """ calculate the probability of Repeat and explore """ all_memory_values = all_memory all_memory = self.dropout(self.Ure(all_memory)) last_memory = self.dropout(self.Wre(last_memory)) last_memory = last_memory.unsqueeze(1) last_memory = last_memory.repeat(1, self.seq_len, 1) output_ere = self.tanh(all_memory + last_memory) output_ere = self.Vre(output_ere) alpha_are = nn.Softmax(dim=1)(output_ere) alpha_are = alpha_are.repeat(1, 1, self.hidden_size) output_cre = alpha_are * all_memory_values output_cre = output_cre.sum(dim=1) output_cre = self.Wcre(output_cre) repeat_explore_mechanism = nn.Softmax(dim=-1)(output_cre) return repeat_explore_mechanism def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'device': 0, 'hidden_size': 4, 'seq_len': 4, 'dropout_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_repeat_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (2, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_repeat_tanh_0[grid(64)](buf2, buf1, 64, XBLOCK =64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0) del buf1 extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused_mul_repeat_sum_3[grid(16)](buf5, primals_1, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_6, (4, 2), (1, 4 ), 0), out=buf7) buf8 = empty_strided_cuda((4, 2), (2, 1), torch.float32) triton_poi_fused__softmax_4[grid(8)](buf7, buf8, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf7 return (buf8, primals_1, primals_4, buf2, buf5, buf6, buf8, primals_6, primals_5) class Repeat_Explore_MechanismNew(nn.Module): def __init__(self, device, hidden_size, seq_len, dropout_prob): super(Repeat_Explore_MechanismNew, self).__init__() self.dropout = nn.Dropout(dropout_prob) self.hidden_size = hidden_size self.device = device self.seq_len = seq_len self.Wre = nn.Linear(hidden_size, hidden_size, bias=False) self.Ure = nn.Linear(hidden_size, hidden_size, bias=False) self.tanh = nn.Tanh() self.Vre = nn.Linear(hidden_size, 1, bias=False) self.Wcre = nn.Linear(hidden_size, 2, bias=False) def forward(self, input_0, input_1): primals_2 = self.Wre.weight primals_3 = self.Ure.weight primals_5 = self.Vre.weight primals_6 = self.Wcre.weight primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
dreaming-qin/RecBole
Repeat_Explore_Mechanism
false
12,318
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
ItemToInterestAggregation
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/7t/c7tv26qinahwg4x6y7nukbjorerecrvfqh4lj443n2izpiaw4vka.py # Topologically Sorted Source Nodes: [D_matrix_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # D_matrix_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 5 x2 = (xindex // 20) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (20*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (5 + x0 + (20*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (10 + x0 + (20*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (15 + x0 + (20*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/p2/cp27pmszy6iifh6ubxqwv5ytt4bqdsag6l4catb7gbvvgqucbbxm.py # Topologically Sorted Source Nodes: [D_matrix_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # D_matrix_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 5 x2 = (xindex // 20) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (20*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (5 + x0 + (20*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (10 + x0 + (20*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (15 + x0 + (20*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 5), (5, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [D_matrix], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [D_matrix_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 80, grid=grid(80), stream=stream0) buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [D_matrix_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 80, grid=grid(80), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), buf2, out=buf3) del buf2 return (reinterpret_tensor(buf3, (4, 5, 4), (20, 1, 5), 0), primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 5), (5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ItemToInterestAggregation(nn.Module): def __init__(self, seq_len, hidden_size, k_interests=5): super().__init__() self.k_interests = k_interests self.theta = nn.Parameter(torch.randn([hidden_size, k_interests])) def forward(self, input_tensor): D_matrix = torch.matmul(input_tensor, self.theta) D_matrix = nn.Softmax(dim=-2)(D_matrix) result = torch.einsum('nij, nik -> nkj', input_tensor, D_matrix) return result def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'seq_len': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 5 x2 = xindex // 20 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (5 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (10 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (15 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 5 x2 = xindex // 20 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (5 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (10 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (15 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 5), (5, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(80)](buf0, buf1, 80, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused__softmax_1[grid(80)](buf1, buf2, 80, XBLOCK=128, num_warps=4, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), buf2, out=buf3) del buf2 return reinterpret_tensor(buf3, (4, 5, 4), (20, 1, 5), 0), primals_2, buf0 class ItemToInterestAggregationNew(nn.Module): def __init__(self, seq_len, hidden_size, k_interests=5): super().__init__() self.k_interests = k_interests self.theta = nn.Parameter(torch.randn([hidden_size, k_interests])) def forward(self, input_0): primals_1 = self.theta primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
dreaming-qin/RecBole
ItemToInterestAggregation
false
12,319
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
AUGRUCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/su/csuyns6mfp66oncomzo5eikxszcgvnk2yq5hnh5qodgq2yxjppwg.py # Topologically Sorted Source Nodes: [add, reset_gate, add_1, update_gate, mul, add_2, new_state, update_gate_1, sub, mul_2, mul_3, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # hy => add_3 # mul => mul # mul_2 => mul_2 # mul_3 => mul_3 # new_state => tanh # reset_gate => sigmoid # sub => sub # update_gate => sigmoid_1 # update_gate_1 => mul_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_1, %getitem_4), kwargs = {}) # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %sigmoid_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %tanh), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + (12*x1)), xmask) tmp6 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask) tmp12 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask) tmp19 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp11 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp20 = tmp19 * tmp5 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp20 * tmp18 tmp26 = tmp24 + tmp25 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr2 + (x2), tmp18, xmask) tl.store(out_ptr3 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (64, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (64, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [gh], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, reset_gate, add_1, update_gate, mul, add_2, new_state, update_gate_1, sub, mul_2, mul_3, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf0, primals_2, buf1, primals_7, primals_6, buf3, buf2, buf4, buf5, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 return (buf5, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (64, 4), (12, 1), 8), buf2, buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AUGRUCell(nn.Module): ' Effect of GRU with attentional update gate (AUGRU). AUGRU combines attention mechanism and GRU seamlessly.\n\n Formally:\n ..math: \tilde{{u}}_{t}^{\\prime}=a_{t} * {u}_{t}^{\\prime} \\\n {h}_{t}^{\\prime}=\\left(1-\tilde{{u}}_{t}^{\\prime}\right) \\circ {h}_{t-1}^{\\prime}+\tilde{{u}}_{t}^{\\prime} \\circ \tilde{{h}}_{t}^{\\prime}\n\n ' def __init__(self, input_size, hidden_size, bias=True): super(AUGRUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input, hidden_output, att_score): gi = F.linear(input, self.weight_ih, self.bias_ih) gh = F.linear(hidden_output, self.weight_hh, self.bias_hh) i_r, i_u, i_h = gi.chunk(3, 1) h_r, h_u, h_h = gh.chunk(3, 1) reset_gate = torch.sigmoid(i_r + h_r) update_gate = torch.sigmoid(i_u + h_u) new_state = torch.tanh(i_h + reset_gate * h_h) att_score = att_score.view(-1, 1) update_gate = att_score * update_gate hy = (1 - update_gate) * hidden_output + update_gate * new_state return hy def get_inputs(): return [torch.rand([64, 4]), torch.rand([64, 4]), torch.rand([16, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp19 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp11 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp20 = tmp19 * tmp5 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp20 * tmp18 tmp26 = tmp24 + tmp25 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp18, xmask) tl.store(out_ptr3 + x2, tmp26, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (64, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (64, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0, primals_2, buf1, primals_7, primals_6, buf3, buf2, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (64, 4), (12, 1), 8), buf2, buf3, buf4 class AUGRUCellNew(nn.Module): ' Effect of GRU with attentional update gate (AUGRU). AUGRU combines attention mechanism and GRU seamlessly.\n\n Formally:\n ..math: \tilde{{u}}_{t}^{\\prime}=a_{t} * {u}_{t}^{\\prime} \\\n {h}_{t}^{\\prime}=\\left(1-\tilde{{u}}_{t}^{\\prime}\right) \\circ {h}_{t-1}^{\\prime}+\tilde{{u}}_{t}^{\\prime} \\circ \tilde{{h}}_{t}^{\\prime}\n\n ' def __init__(self, input_size, hidden_size, bias=True): super(AUGRUCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input_0, input_1, input_2): primals_1 = self.weight_ih primals_4 = self.weight_hh primals_2 = self.bias_ih primals_5 = self.bias_hh primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
dreaming-qin/RecBole
AUGRUCell
false
12,320
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
ComplexLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/k7/ck7eabcc7dltczcwxevz2ehp36wjm7bizks5htevqjcppd66dcya.py # Topologically Sorted Source Nodes: [sub, add], Original ATen: [aten.sub, aten.add] # Source node to ATen node mapping: # add => add # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %view_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {}) triton_poi_fused_add_sub_0 = async_compile.triton('triton_poi_fused_add_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr1 + (x2), xmask) tmp9 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 - tmp5 tmp8 = tmp7 + tmp1 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tl.store(in_out_ptr0 + (x2), tmp6, xmask) tl.store(in_out_ptr1 + (x2), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf3) del primals_1 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [sub, add], Original ATen: [aten.sub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_sub_0.run(buf2, buf5, primals_2, buf1, primals_5, buf4, 256, grid=grid(256), stream=stream0) del buf1 del buf4 del primals_2 del primals_5 return (buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Linear class ComplexLinear(Module): def __init__(self, in_features, out_features): super(ComplexLinear, self).__init__() self.fc_r = Linear(in_features, out_features) self.fc_i = Linear(in_features, out_features) def forward(self, input_r, input_i): return self.fc_r(input_r) - self.fc_i(input_i), self.fc_r(input_i ) + self.fc_i(input_r) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr1 + x2, xmask) tmp9 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 - tmp5 tmp8 = tmp7 + tmp1 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tl.store(in_out_ptr0 + x2, tmp6, xmask) tl.store(in_out_ptr1 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf3) del primals_1 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 get_raw_stream(0) triton_poi_fused_add_sub_0[grid(256)](buf2, buf5, primals_2, buf1, primals_5, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf4 del primals_2 del primals_5 return buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0) class ComplexLinearNew(Module): def __init__(self, in_features, out_features): super(ComplexLinearNew, self).__init__() self.fc_r = Linear(in_features, out_features) self.fc_i = Linear(in_features, out_features) def forward(self, input_0, input_1): primals_1 = self.fc_r.weight primals_2 = self.fc_r.bias primals_4 = self.fc_i.weight primals_5 = self.fc_i.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
drydenwiebe/complexPyTorch
ComplexLinear
false
12,321
[ "MIT" ]
0
cea88ba7ee5692dfa1b40f0ba609ef14160d5073
https://github.com/drydenwiebe/complexPyTorch/tree/cea88ba7ee5692dfa1b40f0ba609ef14160d5073
BinaryClassificationHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_3 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_5 return (reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch class BinaryClassificationHead(torch.nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.out_proj = torch.nn.Linear(config.hidden_size, 1) def init_weights(self): self.dense.weight.data.normal_(mean=0.0, std=self.config. initializer_range) if self.dense.bias is not None: self.dense.bias.data.zero_() def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_5 return reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4 class BinaryClassificationHeadNew(torch.nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.out_proj = torch.nn.Linear(config.hidden_size, 1) def init_weights(self): self.dense.weight.data.normal_(mean=0.0, std=self.config. initializer_range) if self.dense.bias is not None: self.dense.bias.data.zero_() def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
BunnyNoBugs/DeepPavlov
BinaryClassificationHead
false
12,322
[ "Apache-2.0" ]
0
b2213db633a669d27d6f745dd780530574ccf8b5
https://github.com/BunnyNoBugs/DeepPavlov/tree/b2213db633a669d27d6f745dd780530574ccf8b5