entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
BasicModel_ConvNet_MaxPool1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ni/cnii7nxq4niiy4lr34yc7mgkobmblidfnkte54pcucvd7ervvub2.py # Topologically Sorted Source Nodes: [conv1d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv1d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 496 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 62) % 2 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/du/cduq3gwruj3pcttaz7jgcybdqfx6yypblacfgntceuh2rbrk5zms.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets, getitem_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 2], [1, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 248 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr1 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/em/cemjfxr474cvy7lvbjzltbj2bxrokqif7efsbkfc6jqj7jow4qcg.py # Topologically Sorted Source Nodes: [conv1d_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv1d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%squeeze, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 464 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 29) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ac/cacoonmn27elu6ggvbgu4am6b355mzkwqz42prnxsccvs3o2qk62.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze_1, [1, 2], [1, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 224 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = (xindex // 14) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (29*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (29*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vw/cvwy5jmi63rkvmren5xbssec6wzlmj32pn6yk5k3v2skpobh3jvb.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_5 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7g/c7gfqptsp3sddqxuwnx67i5ihjsfzdwav52gbj2otvahhhrmoacr.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_5 = async_compile.triton('triton_per_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 56 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (10*x0)), tmp11, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (2, 1, 3), (3, 3, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_4, (4, 2, 3), (6, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8, ), (1, )) assert_size_stride(primals_8, (10, 8), (8, 1)) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 62), (124, 62, 1)) buf1 = buf0; del buf0 # reuse buf15 = empty_strided_cuda((4, 2, 62), (124, 62, 1), torch.bool) # Topologically Sorted Source Nodes: [conv1d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf15, 496, grid=grid(496), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 2, 1, 31), (62, 31, 31, 1), torch.int8) buf3 = empty_strided_cuda((4, 2, 1, 31), (62, 31, 31, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 248, grid=grid(248), stream=stream0) # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (4, 2, 31), (62, 31, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 29), (116, 29, 1)) buf5 = buf4; del buf4 # reuse buf14 = empty_strided_cuda((4, 4, 29), (116, 29, 1), torch.bool) # Topologically Sorted Source Nodes: [conv1d_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf5, primals_5, buf14, 464, grid=grid(464), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 4, 1, 14), (56, 14, 14, 1), torch.int8) buf7 = empty_strided_cuda((4, 4, 1, 14), (56, 14, 14, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 224, grid=grid(224), stream=stream0) buf8 = empty_strided_cuda((56, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (56, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 448, grid=grid(448), stream=stream0) del primals_7 buf10 = empty_strided_cuda((56, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (8, 10), (1, 8), 0), alpha=1, beta=1, out=buf10) del primals_9 buf13 = empty_strided_cuda((56, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_5.run(buf10, buf13, 56, 10, grid=grid(56), stream=stream0) del buf10 return (buf13, primals_1, primals_3, primals_4, reinterpret_tensor(buf1, (4, 2, 1, 62), (124, 62, 62, 1), 0), buf2, reinterpret_tensor(buf3, (4, 2, 31), (62, 31, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 29), (116, 29, 29, 1), 0), buf6, reinterpret_tensor(buf7, (56, 4), (4, 1), 0), buf9, buf13, primals_8, primals_6, buf14, buf15, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64), (64, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2, 3), (6, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((10, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn as nn from typing import no_type_check class BasicModel_ConvNet_MaxPool1d(nn.Module): """Same as above, but with the MaxPool2d replaced with a MaxPool1d. This is useful because the MaxPool modules behave differently to other modules from the perspective of the DeepLift Attributions """ def __init__(self) ->None: super().__init__() self.conv1 = nn.Conv1d(1, 2, 3) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool1d(2) self.conv2 = nn.Conv1d(2, 4, 3) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool1d(2) self.fc1 = nn.Linear(4, 8) self.relu3 = nn.ReLU() self.fc2 = nn.Linear(8, 10) self.softmax = nn.Softmax(dim=1) self.fc1.weight = nn.Parameter(torch.ones(8, 4)) self.fc2.weight = nn.Parameter(torch.ones(10, 8)) @no_type_check def forward(self, x: 'Tensor') ->Tensor: x = self.relu1(self.conv1(x)) x = self.pool1(x) x = self.relu2(self.conv2(x)) x = self.pool2(x) x = x.view(-1, 4) x = self.relu3(self.fc1(x)) x = self.fc2(x) return self.softmax(x) def get_inputs(): return [torch.rand([4, 1, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 496 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 62 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 248 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 464 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 29 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 224 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 29 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 29 * x1), xmask, eviction_policy ='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 56 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (2, 1, 3), (3, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_4, (4, 2, 3), (6, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (10, 8), (8, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 62), (124, 62, 1)) buf1 = buf0 del buf0 buf15 = empty_strided_cuda((4, 2, 62), (124, 62, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(496)](buf1, primals_2, buf15, 496, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 2, 1, 31), (62, 31, 31, 1), torch.int8) buf3 = empty_strided_cuda((4, 2, 1, 31), (62, 31, 31, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_1[grid(248)](buf1, buf2, buf3, 248, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (4, 2, 31), (62, 31, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 29), (116, 29, 1)) buf5 = buf4 del buf4 buf14 = empty_strided_cuda((4, 4, 29), (116, 29, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(464)](buf5, primals_5, buf14, 464, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 1, 14), (56, 14, 14, 1), torch.int8) buf7 = empty_strided_cuda((4, 4, 1, 14), (56, 14, 14, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(224)](buf5, buf6, buf7, 224, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((56, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (56, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(448)](buf9, primals_7, 448, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((56, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (8, 10), (1, 8), 0), alpha=1, beta=1, out=buf10) del primals_9 buf13 = empty_strided_cuda((56, 10), (10, 1), torch.float32) triton_per_fused__softmax_5[grid(56)](buf10, buf13, 56, 10, XBLOCK= 1, num_warps=2, num_stages=1) del buf10 return buf13, primals_1, primals_3, primals_4, reinterpret_tensor(buf1, (4, 2, 1, 62), (124, 62, 62, 1), 0), buf2, reinterpret_tensor(buf3, (4, 2, 31), (62, 31, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 29), (116, 29, 29, 1), 0), buf6, reinterpret_tensor(buf7, (56, 4), (4, 1), 0 ), buf9, buf13, primals_8, primals_6, buf14, buf15 class BasicModel_ConvNet_MaxPool1dNew(nn.Module): """Same as above, but with the MaxPool2d replaced with a MaxPool1d. This is useful because the MaxPool modules behave differently to other modules from the perspective of the DeepLift Attributions """ def __init__(self) ->None: super().__init__() self.conv1 = nn.Conv1d(1, 2, 3) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool1d(2) self.conv2 = nn.Conv1d(2, 4, 3) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool1d(2) self.fc1 = nn.Linear(4, 8) self.relu3 = nn.ReLU() self.fc2 = nn.Linear(8, 10) self.softmax = nn.Softmax(dim=1) self.fc1.weight = nn.Parameter(torch.ones(8, 4)) self.fc2.weight = nn.Parameter(torch.ones(10, 8)) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
YNNEKUW/captum
BasicModel_ConvNet_MaxPool1d
false
12,015
[ "BSD-3-Clause" ]
0
c8b5357b21f2ddf440e5f0ce25635977292aa5d1
https://github.com/YNNEKUW/captum/tree/c8b5357b21f2ddf440e5f0ce25635977292aa5d1
BasicModel3
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ur/curqs5ijaywea7eln4nsct7fk2aktgmcp3hydopsadkd44czucp4.py # Topologically Sorted Source Nodes: [sub, relu_out1, relu_out2, sub_1, relu_2], Original ATen: [aten.sub, aten.relu] # Source node to ATen node mapping: # relu_2 => relu_2 # relu_out1 => relu # relu_out2 => relu_1 # sub => sub # sub_1 => sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg1_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %relu_1), kwargs = {}) # %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) triton_poi_fused_relu_sub_0 = async_compile.triton('triton_poi_fused_relu_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp7 = tmp4 - tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, relu_out1, relu_out2, sub_1, relu_2], Original ATen: [aten.sub, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BasicModel3(nn.Module): """ Example model two from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2)) """ def __init__(self) ->None: super().__init__() def forward(self, input1, input2): relu_out1 = F.relu(input1 - 1) relu_out2 = F.relu(input2) return F.relu(relu_out1 - relu_out2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp7 = tmp4 - tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class BasicModel3New(nn.Module): """ Example model two from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2)) """ def __init__(self) ->None: super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
YNNEKUW/captum
BasicModel3
false
12,016
[ "BSD-3-Clause" ]
0
c8b5357b21f2ddf440e5f0ce25635977292aa5d1
https://github.com/YNNEKUW/captum/tree/c8b5357b21f2ddf440e5f0ce25635977292aa5d1
make_style
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [style], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # style => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2d/c2dxxe3mbggak6huy3dpyor6kge4bsndzz6nxnksovqavwda6k3i.py # Topologically Sorted Source Nodes: [pow_1, sum_1, pow_2, style_2], Original ATen: [aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # pow_1 => pow_1 # pow_2 => pow_2 # style_2 => div # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %pow_2), kwargs = {}) triton_poi_fused_div_pow_sum_1 = async_compile.triton('triton_poi_fused_div_pow_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_pow_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_pow_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [style], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, pow_2, style_2], Original ATen: [aten.pow, aten.sum, aten.div] triton_poi_fused_div_pow_sum_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class make_style(nn.Module): def __init__(self): super().__init__() self.flatten = nn.Flatten() def forward(self, x0): style = F.avg_pool2d(x0, kernel_size=(x0.shape[-2], x0.shape[-1])) style = self.flatten(style) style = style / torch.sum(style ** 2, axis=1, keepdim=True) ** 0.5 return style def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_div_pow_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_pow_sum_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 return buf1, class make_styleNew(nn.Module): def __init__(self): super().__init__() self.flatten = nn.Flatten() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
YinuoJin/cellpose
make_style
false
12,017
[ "BSD-3-Clause" ]
0
eb8df70f295ac8465633f468d487aee1dd13a181
https://github.com/YinuoJin/cellpose/tree/eb8df70f295ac8465633f468d487aee1dd13a181
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/i3/ci3nuuurbsrmcufle642yc7udhwn4itsu6aptfssij5nzrnylpne.py # Topologically Sorted Source Nodes: [conv1d, output], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv1d => convolution # output => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/in/ciniyjn7eyz6kfao5xoph2rbugonh4ujhobeqsni3egmy2cyb6jq.py # Topologically Sorted Source Nodes: [add, mu, sigma], Original ATen: [aten.add, aten.mean, aten.std] # Source node to ATen node mapping: # add => add # mu => mean # sigma => var # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add, [-1]), kwargs = {correction: 1.0, keepdim: True}) triton_poi_fused_add_mean_std_3 = async_compile.triton('triton_poi_fused_add_mean_std_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + (x2), tmp29, xmask) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3p/c3pxygonyvwt7htiobzn7yqzmectxzeqvh7ezkgsvmrrsjmztpuc.py # Topologically Sorted Source Nodes: [add, sub, add_1, ln_out, mul, ln_out_1], Original ATen: [aten.add, aten.sub, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # ln_out => div # ln_out_1 => add_2 # mul => mul # sub => sub # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %expand), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 0.001), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %expand_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {}) triton_poi_fused_add_div_mul_sub_4 = async_compile.triton('triton_poi_fused_add_div_mul_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (y0), ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = libdevice.sqrt(tmp5) tmp7 = 0.001 tmp8 = tmp6 + tmp7 tmp9 = tmp4 / tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + (4*y3)), tmp13, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d, output], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf4, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add, mu, sigma], Original ATen: [aten.add, aten.mean, aten.std] triton_poi_fused_add_mean_std_3.run(buf6, buf4, primals_1, buf7, 16, grid=grid(16), stream=stream0) buf8 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, sub, add_1, ln_out, mul, ln_out_1], Original ATen: [aten.add, aten.sub, aten.div, aten.mul] triton_poi_fused_add_div_mul_sub_4.run(buf4, primals_1, buf7, buf6, primals_6, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0) del buf6 del buf7 del primals_7 return (buf8, primals_1, primals_2, primals_4, primals_6, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerNormalization(nn.Module): """ Layer normalization module """ def __init__(self, d_hid, eps=0.001): super(LayerNormalization, self).__init__() self.eps = eps self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True) self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True) def forward(self, z): if z.size(1) == 1: return z mu = torch.mean(z, keepdim=True, dim=-1) sigma = torch.std(z, keepdim=True, dim=-1) ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps) ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as( ln_out) return ln_out class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_hid, d_inner_hid, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1) self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1) self.layer_norm = LayerNormalization(d_hid) self.dropout = nn.Dropout(dropout) self.relu = nn.ReLU() def forward(self, x): residual = x output = self.relu(self.w_1(x.transpose(1, 2))) output = self.w_2(output).transpose(2, 1) output = self.dropout(output) return self.layer_norm(output + residual) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_hid': 4, 'd_inner_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr3 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = libdevice.sqrt(tmp5) tmp7 = 0.001 tmp8 = tmp6 + tmp7 tmp9 = tmp4 / tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_std_3[grid(16)](buf6, buf4, primals_1, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0) del buf0 triton_poi_fused_add_div_mul_sub_4[grid(16, 4)](buf4, primals_1, buf7, buf6, primals_6, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK =16, num_warps=1, num_stages=1) del buf6 del buf7 del primals_7 return buf8, primals_1, primals_2, primals_4, primals_6, buf2, buf4 class LayerNormalization(nn.Module): """ Layer normalization module """ def __init__(self, d_hid, eps=0.001): super(LayerNormalization, self).__init__() self.eps = eps self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True) self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True) def forward(self, z): if z.size(1) == 1: return z mu = torch.mean(z, keepdim=True, dim=-1) sigma = torch.std(z, keepdim=True, dim=-1) ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps) ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as( ln_out) return ln_out class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_hid, d_inner_hid, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1) self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1) self.layer_norm = LayerNormalization(d_hid) self.dropout = nn.Dropout(dropout) self.relu = nn.ReLU() def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.a_2 primals_7 = self.layer_norm.b_2 primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
YunjieJi/attention-is-all-you-need-pytorch
PositionwiseFeedForward
false
12,018
[ "MIT" ]
0
636117b438d584ccba0ae5d6998fc02f3888f46e
https://github.com/YunjieJi/attention-is-all-you-need-pytorch/tree/636117b438d584ccba0ae5d6998fc02f3888f46e
NormLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/kr/ckrxqy227ju5oy4cidmyb35esvbq3qjjqbbpiqrdjo6j5hkwzhi4.py # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4.00000001), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.249999999375 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NormLayer(nn.Module): def __init__(self, mean, std, n=None, eps=1e-08) ->None: super().__init__() self.mean = mean self.std = std self.eps = eps def forward(self, x): return (x - self.mean) / (self.std + self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'mean': 4, 'std': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.249999999375 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormLayerNew(nn.Module): def __init__(self, mean, std, n=None, eps=1e-08) ->None: super().__init__() self.mean = mean self.std = std self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
YNNEKUW/captum
NormLayer
false
12,019
[ "BSD-3-Clause" ]
0
c8b5357b21f2ddf440e5f0ce25635977292aa5d1
https://github.com/YNNEKUW/captum/tree/c8b5357b21f2ddf440e5f0ce25635977292aa5d1
WingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5y/c5yx2tmktzc5brhfqiwvzfmmrhkpiscn4gi6kct3s3fnisnd7xlo.py # Topologically Sorted Source Nodes: [sub, delta, lt, truediv, add, log, mul, sub_1, losses, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.div, aten.add, aten.log, aten.mul, aten.where, aten.sum] # Source node to ATen node mapping: # add => add # delta => abs_1 # log => log # losses => where # lt => lt # mul => mul # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 10.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1.0), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 10.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, -7.91759469228055), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %mul, %sub_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [1, 2]), kwargs = {}) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0 = async_compile.triton('triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 10.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tmp10 * tmp4 tmp12 = -7.91759469228055 tmp13 = tmp3 - tmp12 tmp14 = tl.where(tmp5, tmp11, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + (x3), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yi/cyivqxc32riwmi4nsqtijcqefjxyivwafk4qjnscryb6cwpfnqim.py # Topologically Sorted Source Nodes: [loss, mul_1], Original ATen: [aten.mean, aten.mul] # Source node to ATen node mapping: # loss => mean # mul_1 => mul_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sum_1, [0]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_poi_fused_mean_mul_1 = async_compile.triton('triton_poi_fused_mean_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, delta, lt, truediv, add, log, mul, sub_1, losses, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.div, aten.add, aten.log, aten.mul, aten.where, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0.run(arg0_1, arg1_1, buf0, 16, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [loss, mul_1], Original ATen: [aten.mean, aten.mul] triton_poi_fused_mean_mul_1.run(buf0, buf1, 4, grid=grid(4), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class WingLoss(nn.Module): """Wing Loss 'Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks' Feng et al. CVPR'2018. Args: omega (float), epsilon (float) are hyper-parameters. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega=10.0, epsilon=2.0, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega = omega self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon)) def criterion(self, pred, target): """Criterion of wingloss. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega, self.omega * torch.log(1.0 + delta / self.epsilon), delta - self.C) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, output, target, target_weight=None): """Forward function. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 10.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tmp10 * tmp4 tmp12 = -7.91759469228055 tmp13 = tmp3 - tmp12 tmp14 = tl.where(tmp5, tmp11, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(16)]( arg0_1, arg1_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_mul_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf0 return buf1, class WingLossNew(nn.Module): """Wing Loss 'Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks' Feng et al. CVPR'2018. Args: omega (float), epsilon (float) are hyper-parameters. use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, omega=10.0, epsilon=2.0, use_target_weight=False, loss_weight=1.0): super().__init__() self.omega = omega self.epsilon = epsilon self.use_target_weight = use_target_weight self.loss_weight = loss_weight self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon)) def criterion(self, pred, target): """Criterion of wingloss. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: pred (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. """ delta = (target - pred).abs() losses = torch.where(delta < self.omega, self.omega * torch.log(1.0 + delta / self.epsilon), delta - self.C) return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZephyrII/mmpose_charger
WingLoss
false
12,020
[ "Apache-2.0" ]
0
ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
https://github.com/ZephyrII/mmpose_charger/tree/ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
ExtResNetBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/nq/cnqfeo5iu3yeiof2x33r7xnbaj7fk2w7g5swc25ak43vd4xaupdu.py # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu] # Source node to ATen node mapping: # input_2 => add, add_1, mul_1, rsqrt, var_mean # input_3 => expm1, gt, mul_2, mul_4, where # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_6), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_2,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_2, %mul_4), kwargs = {}) triton_per_fused_elu_native_group_norm_0 = async_compile.triton('triton_per_fused_elu_native_group_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_elu_native_group_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp30 = 1.0 tmp31 = tmp27 * tmp30 tmp32 = libdevice.expm1(tmp31) tmp33 = tmp32 * tmp30 tmp34 = tl.where(tmp29, tmp31, tmp33) tl.store(in_out_ptr0 + (r1 + (64*x0)), tmp34, xmask) tl.store(out_ptr2 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/u5/cu5cyt57jdnmvmp7ergqhphxhmiitnaocaheuhyvmg4limh7mwlo.py # Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu] # Source node to ATen node mapping: # input_8 => add_4, add_5, mul_11, rsqrt_2, var_mean_2 # out => add_6 # out_1 => expm1_2, gt_2, mul_12, mul_14, where_2 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_22), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %unsqueeze_19), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %where), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {}) # %mul_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_12,), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_12, %mul_14), kwargs = {}) triton_per_fused_add_elu_native_group_norm_1 = async_compile.triton('triton_per_fused_add_elu_native_group_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_elu_native_group_norm_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = 0.0 tmp31 = tmp29 > tmp30 tmp32 = 1.0 tmp33 = tmp29 * tmp32 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp32 tmp36 = tl.where(tmp31, tmp33, tmp35) tl.store(in_out_ptr0 + (r1 + (64*x0)), tmp36, xmask) tl.store(out_ptr2 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf4; del buf4 # reuse buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu] stream0 = get_raw_stream(0) triton_per_fused_elu_native_group_norm_0.run(buf6, buf0, primals_3, primals_4, buf1, buf5, 4, 64, grid=grid(4), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf11; del buf11 # reuse buf12 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) # Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.native_group_norm, aten.elu] triton_per_fused_elu_native_group_norm_0.run(buf13, buf7, primals_6, primals_7, buf8, buf12, 4, 64, grid=grid(4), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf20 = buf19; del buf19 # reuse buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) # Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu] triton_per_fused_add_elu_native_group_norm_1.run(buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 4, 64, grid=grid(4), stream=stream0) del primals_10 return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf5, (4, 1), (1, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 1), (1, 1), 0), reinterpret_tensor(buf12, (4, 1), (1, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 1), (1, 1), 0), reinterpret_tensor(buf18, (4, 1), (1, 1), 0), buf20, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn def conv3d(in_channels, out_channels, kernel_size, bias, padding): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size(int or tuple): size of the convolving kernel order (string): order of things, e.g. 'cr' -> conv + ReLU 'gcr' -> groupnorm + conv + ReLU 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU 'bcr' -> batchnorm + conv + ReLU num_groups (int): number of groups for the GroupNorm padding (int or tuple): add zero-padding added to all three sides of the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') if is_before_conv: num_channels = in_channels else: num_channels = out_channels if num_channels < num_groups: num_groups = 1 assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}' modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=num_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) elif char == 'a': modules.append(('Rational', Rational())) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c', 'a']" ) return modules class Rational(torch.nn.Module): """Rational Activation function. Implementation provided by Mario Casado (https://github.com/Lezcano) It follows: `f(x) = P(x) / Q(x), where the coefficients of P and Q are initialized to the best rational approximation of degree (3,2) to the ReLU function # Reference - [Rational neural networks](https://arxiv.org/abs/2004.01902) """ def __init__(self): super().__init__() self.coeffs = torch.nn.Parameter(torch.Tensor(4, 2)) self.reset_parameters() def reset_parameters(self): self.coeffs.data = torch.Tensor([[1.1915, 0.0], [1.5957, 2.383], [ 0.5, 0.0], [0.0218, 1.0]]) def forward(self, input: 'torch.Tensor') ->torch.Tensor: self.coeffs.data[0, 1].zero_() exp = torch.tensor([3.0, 2.0, 1.0, 0.0], device=input.device, dtype =input.dtype) X = torch.pow(input.unsqueeze(-1), exp) PQ = X @ self.coeffs output = torch.div(PQ[..., 0], PQ[..., 1]) return output class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int or tuple): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm padding (int or tuple): """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'gcr', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class ExtResNetBlock(nn.Module): """ Basic UNet block consisting of a SingleConv followed by the residual block. The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number of output channels is compatible with the residual block that follows. This block can be used instead of standard DoubleConv in the Encoder module. Motivated by: https://arxiv.org/pdf/1706.00120.pdf Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'cge', num_groups=8, **kwargs): super(ExtResNetBlock, self).__init__() self.conv1 = SingleConv(in_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) self.conv2 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) n_order = order for c in 'rel': n_order = n_order.replace(c, '') self.conv3 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=n_order, num_groups=num_groups) if 'l' in order: self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) elif 'e' in order: self.non_linearity = nn.ELU(inplace=True) else: self.non_linearity = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(x) residual = out out = self.conv2(out) out = self.conv3(out) out += residual out = self.non_linearity(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp30 = 1.0 tmp31 = tmp27 * tmp30 tmp32 = libdevice.expm1(tmp31) tmp33 = tmp32 * tmp30 tmp34 = tl.where(tmp29, tmp31, tmp33) tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp34, xmask) tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = 0.0 tmp31 = tmp29 > tmp30 tmp32 = 1.0 tmp33 = tmp29 * tmp32 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp32 tmp36 = tl.where(tmp31, tmp33, tmp35) tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp36, xmask) tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf4 del buf4 buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_elu_native_group_norm_0[grid(4)](buf6, buf0, primals_3, primals_4, buf1, buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf11 del buf11 buf12 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_elu_native_group_norm_0[grid(4)](buf13, buf7, primals_6, primals_7, buf8, buf12, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf20 = buf19 del buf19 buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_add_elu_native_group_norm_1[grid(4)](buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_10 return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf5, (4, 1), (1, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 1), (1, 1), 0), reinterpret_tensor( buf12, (4, 1), (1, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 1), (1, 1), 0), reinterpret_tensor(buf18, (4, 1), (1, 1), 0), buf20 ) def conv3d(in_channels, out_channels, kernel_size, bias, padding): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size(int or tuple): size of the convolving kernel order (string): order of things, e.g. 'cr' -> conv + ReLU 'gcr' -> groupnorm + conv + ReLU 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU 'bcr' -> batchnorm + conv + ReLU num_groups (int): number of groups for the GroupNorm padding (int or tuple): add zero-padding added to all three sides of the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') if is_before_conv: num_channels = in_channels else: num_channels = out_channels if num_channels < num_groups: num_groups = 1 assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}' modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=num_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) elif char == 'a': modules.append(('Rational', Rational())) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c', 'a']" ) return modules class Rational(torch.nn.Module): """Rational Activation function. Implementation provided by Mario Casado (https://github.com/Lezcano) It follows: `f(x) = P(x) / Q(x), where the coefficients of P and Q are initialized to the best rational approximation of degree (3,2) to the ReLU function # Reference - [Rational neural networks](https://arxiv.org/abs/2004.01902) """ def __init__(self): super().__init__() self.coeffs = torch.nn.Parameter(torch.Tensor(4, 2)) self.reset_parameters() def reset_parameters(self): self.coeffs.data = torch.Tensor([[1.1915, 0.0], [1.5957, 2.383], [ 0.5, 0.0], [0.0218, 1.0]]) def forward(self, input: 'torch.Tensor') ->torch.Tensor: self.coeffs.data[0, 1].zero_() exp = torch.tensor([3.0, 2.0, 1.0, 0.0], device=input.device, dtype =input.dtype) X = torch.pow(input.unsqueeze(-1), exp) PQ = X @ self.coeffs output = torch.div(PQ[..., 0], PQ[..., 1]) return output class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int or tuple): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm padding (int or tuple): """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'gcr', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class ExtResNetBlockNew(nn.Module): """ Basic UNet block consisting of a SingleConv followed by the residual block. The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number of output channels is compatible with the residual block that follows. This block can be used instead of standard DoubleConv in the Encoder module. Motivated by: https://arxiv.org/pdf/1706.00120.pdf Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'cge', num_groups=8, **kwargs): super(ExtResNetBlockNew, self).__init__() self.conv1 = SingleConv(in_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) self.conv2 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) n_order = order for c in 'rel': n_order = n_order.replace(c, '') self.conv3 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=n_order, num_groups=num_groups) if 'l' in order: self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) elif 'e' in order: self.non_linearity = nn.ELU(inplace=True) else: self.non_linearity = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.conv.weight primals_3 = self.conv1.groupnorm.weight primals_4 = self.conv1.groupnorm.bias primals_5 = self.conv2.conv.weight primals_6 = self.conv2.groupnorm.weight primals_7 = self.conv2.groupnorm.bias primals_8 = self.conv3.conv.weight primals_9 = self.conv3.groupnorm.weight primals_10 = self.conv3.groupnorm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
YinanZYN/pytorch-3dunet
ExtResNetBlock
false
12,021
[ "MIT" ]
0
d1494f421a836af54c3dde65c54e3e62d5c00800
https://github.com/YinanZYN/pytorch-3dunet/tree/d1494f421a836af54c3dde65c54e3e62d5c00800
Conv2dBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/z3/cz3vliqlpgih6ihwoaxl6cmnicfmv2ygutcuphilcsragp3evc57.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_3, buf2, 16, grid=grid(16), stream=stream0) del primals_3 return (buf1, primals_1, primals_2, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): """ Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan """ def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = nn.Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, primals_2, buf2 def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): """ Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan """ def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = nn.Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class Conv2dBlockNew(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlockNew, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
YueZHOU0926/MUNIT_3D
Conv2dBlock
false
12,022
[ "MIT" ]
0
5cb22b5f3cb127d5b2c4eea038254a7881bab372
https://github.com/YueZHOU0926/MUNIT_3D/tree/5cb22b5f3cb127d5b2c4eea038254a7881bab372
BCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/a6/ca625ch2ma4nxamrbh6aiqjarhys2ayutwxi7mqpfwzyf5cgoavx.py # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.binary_cross_entropy, aten.mul] # Source node to ATen node mapping: # loss => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1 # mul => mul_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_binary_cross_entropy_mul_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = tmp17 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.binary_cross_entropy, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BCELoss(nn.Module): """Binary Cross Entropy loss.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.binary_cross_entropy self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: batch_size: N num_labels: K Args: output (torch.Tensor[N, K]): Output classification. target (torch.Tensor[N, K]): Target classification. target_weight (torch.Tensor[N, K] or torch.Tensor[N]): Weights across different labels. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output, target, reduction='none') if target_weight.dim() == 1: target_weight = target_weight[:, None] loss = (loss * target_weight).mean() else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = tmp17 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCELossNew(nn.Module): """Binary Cross Entropy loss.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.binary_cross_entropy self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZephyrII/mmpose_charger
BCELoss
false
12,023
[ "Apache-2.0" ]
0
ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
https://github.com/ZephyrII/mmpose_charger/tree/ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
CombinedTargetMSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3f/c3fha6hwigp5qdkirvgzpdtvtztnza4ys4zevam5i2owamrhkdzx.py # Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv, mul_9], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div] # Source node to ATen node mapping: # heatmap_gt_1 => mul_1 # heatmap_pred_1 => mul # loss => add # loss_1 => add_1 # loss_2 => add_2 # mse_loss => mean, pow_1, sub # mse_loss_1 => mean_1, pow_2, sub_1 # mse_loss_2 => mean_2, pow_3, sub_2 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # mul_8 => mul_8 # mul_9 => mul_9 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %select), kwargs = {}) # %mul_1 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %select_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_4), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_5), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, %mul_7), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 0.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_8), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {}) triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp4 * tmp10 tmp13 = tmp4 * tmp12 tmp14 = tmp11 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp20 = tmp4 * tmp19 tmp22 = tmp4 * tmp21 tmp23 = tmp20 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 4.0 tmp29 = tmp9 / tmp28 tmp30 = 0.5 tmp31 = tmp29 * tmp30 tmp32 = 0.0 tmp33 = tmp31 + tmp32 tmp34 = tmp18 / tmp28 tmp35 = tmp34 * tmp30 tmp36 = tmp33 + tmp35 tmp37 = tmp27 / tmp28 tmp38 = tmp37 * tmp30 tmp39 = tmp36 + tmp38 tmp40 = 1.0 tmp41 = tmp39 * tmp40 tmp42 = tmp41 * tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp42, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv, mul_9], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_mse_loss_mul_0.run(buf3, arg0_1, arg2_1, arg1_1, 1, 4, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class CombinedTargetMSELoss(nn.Module): """MSE loss for combined target. CombinedTarget: The combination of classification target (response map) and regression target (offset map). Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight, loss_weight=1.0): super().__init__() self.criterion = nn.MSELoss(reduction='mean') self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight): batch_size = output.size(0) num_channels = output.size(1) heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split( 1, 1) heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1 ) loss = 0.0 num_joints = num_channels // 3 for idx in range(num_joints): heatmap_pred = heatmaps_pred[idx * 3].squeeze() heatmap_gt = heatmaps_gt[idx * 3].squeeze() offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze() offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze() offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze() offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze() if self.use_target_weight: heatmap_pred = heatmap_pred * target_weight[:, idx] heatmap_gt = heatmap_gt * target_weight[:, idx] loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt) loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred, heatmap_gt * offset_x_gt) loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred, heatmap_gt * offset_y_gt) return loss / num_joints * self.loss_weight def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'use_target_weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp4 * tmp10 tmp13 = tmp4 * tmp12 tmp14 = tmp11 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp20 = tmp4 * tmp19 tmp22 = tmp4 * tmp21 tmp23 = tmp20 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 4.0 tmp29 = tmp9 / tmp28 tmp30 = 0.5 tmp31 = tmp29 * tmp30 tmp32 = 0.0 tmp33 = tmp31 + tmp32 tmp34 = tmp18 / tmp28 tmp35 = tmp34 * tmp30 tmp36 = tmp33 + tmp35 tmp37 = tmp27 / tmp28 tmp38 = tmp37 * tmp30 tmp39 = tmp36 + tmp38 tmp40 = 1.0 tmp41 = tmp39 * tmp40 tmp42 = tmp41 * tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp42, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf3, arg0_1, arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf3, class CombinedTargetMSELossNew(nn.Module): """MSE loss for combined target. CombinedTarget: The combination of classification target (response map) and regression target (offset map). Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight, loss_weight=1.0): super().__init__() self.criterion = nn.MSELoss(reduction='mean') self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ZephyrII/mmpose_charger
CombinedTargetMSELoss
false
12,024
[ "Apache-2.0" ]
0
ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
https://github.com/ZephyrII/mmpose_charger/tree/ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
FCDiscriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fl/cflskeukqjcpn5pfynzkpwyovblowpewl3lyqiwirncoqacxcylo.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fi/cfik3ekgqfui53hs3oovko4x7tlh4b2wbgnht32gjrarwmhugyng.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ed/cedb4xkhtn35e7lnqetrdwsyqvpftp56fehphd4yymgaavex4aka.py # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_4 => convolution_2 # x_5 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7q/c7qkey6xgen6g4in6j4xxwlisrmnbat37t62h6ukhjyafaazep4c.py # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_6 => convolution_3 # x_7 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ik/cik5pyqifucbhblhkx6wygggufrollappejam67g5gl4ncmmv2wh.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_8 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (512, ), (1, )) assert_size_stride(primals_10, (1, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, 131072, grid=grid(131072), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf5, primals_7, 65536, grid=grid(65536), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 4, 4), (8192, 16, 4, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf7, primals_9, 32768, grid=grid(32768), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 2, 2), (4, 4, 2, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf9, primals_11, 16, grid=grid(16), stream=stream0) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FCDiscriminator(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminator, self).__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.conv3(x) x = self.leaky_relu(x) x = self.conv4(x) x = self.leaky_relu(x) x = self.classifier(x) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (1, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(131072)](buf3, primals_5, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(65536)](buf5, primals_7, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 4, 4), (8192, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_leaky_relu_3[grid(32768)](buf7, primals_9, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 2, 2), (4, 4, 2, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(16)](buf9, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7) class FCDiscriminatorNew(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminatorNew, self).__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.classifier.weight primals_11 = self.classifier.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
YoNyeoSeok/AsymTri
FCDiscriminator
false
12,025
[ "MIT" ]
0
a5a9a4b92074d770ed57802ff26b149a301cf4a4
https://github.com/YoNyeoSeok/AsymTri/tree/a5a9a4b92074d770ed57802ff26b149a301cf4a4
VertexDirectEmbedder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xq/cxqinuparlha25j4geyv6tolvpah7qdqdkpecjesyn3kblysszql.py # Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] # Source node to ATen node mapping: # clamp => clamp_min # norm => pow_1, pow_2, sum_1 # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06 ) ->torch.Tensor: """ Normalize N D-dimensional embedding vectors arranged in a tensor [N, D] Args: embeddings (tensor [N, D]): N D-dimensional embedding vectors epsilon (float): minimum value for a vector norm Return: Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1. """ return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim= True), min=epsilon) class VertexDirectEmbedder(nn.Module): """ Class responsible for embedding vertices. Vertex embeddings take the form of a tensor of size [N, D], where N = number of vertices D = number of dimensions in the embedding space """ def __init__(self, num_vertices: 'int', embed_dim: 'int'): """ Initialize embedder, set random embeddings Args: num_vertices (int): number of vertices to embed embed_dim (int): number of dimensions in the embedding space """ super(VertexDirectEmbedder, self).__init__() self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim)) self.reset_parameters() @torch.no_grad() def reset_parameters(self): """ Reset embeddings to random values """ torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5) def forward(self) ->torch.Tensor: """ Produce vertex embeddings, a tensor of shape [N, D] where: N = number of vertices D = number of dimensions in the embedding space Return: Full vertex embeddings, a tensor of shape [N, D] """ return normalize_embeddings(self.embeddings) @torch.no_grad() def load(self, fpath: 'str'): """ Load data from a file Args: fpath (str): file path to load data from """ with PathManager.open(fpath, 'rb') as hFile: data = pickle.load(hFile) for name in ['embeddings']: if name in data: getattr(self, name).copy_(torch.tensor(data[name]).float()) def get_inputs(): return [] def get_init_inputs(): return [[], {'num_vertices': 4, 'embed_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf0, primals_1 def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06 ) ->torch.Tensor: """ Normalize N D-dimensional embedding vectors arranged in a tensor [N, D] Args: embeddings (tensor [N, D]): N D-dimensional embedding vectors epsilon (float): minimum value for a vector norm Return: Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1. """ return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim= True), min=epsilon) class VertexDirectEmbedderNew(nn.Module): """ Class responsible for embedding vertices. Vertex embeddings take the form of a tensor of size [N, D], where N = number of vertices D = number of dimensions in the embedding space """ def __init__(self, num_vertices: 'int', embed_dim: 'int'): """ Initialize embedder, set random embeddings Args: num_vertices (int): number of vertices to embed embed_dim (int): number of dimensions in the embedding space """ super(VertexDirectEmbedderNew, self).__init__() self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim)) self.reset_parameters() @torch.no_grad() def reset_parameters(self): """ Reset embeddings to random values """ torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5) @torch.no_grad() def load(self, fpath: 'str'): """ Load data from a file Args: fpath (str): file path to load data from """ with PathManager.open(fpath, 'rb') as hFile: data = pickle.load(hFile) for name in ['embeddings']: if name in data: getattr(self, name).copy_(torch.tensor(data[name]).float()) def forward(self): primals_1 = self.embeddings output = call([primals_1]) return output[0]
YutouTaro/detectron2
VertexDirectEmbedder
false
12,026
[ "Apache-2.0" ]
0
29f90062fa2978a35f1d599bb30768a2370378ca
https://github.com/YutouTaro/detectron2/tree/29f90062fa2978a35f1d599bb30768a2370378ca
Affine
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/we/cwegr75gc7slhvygkh4qgpti3y7cw7j23tllhdeulaje2nyjxbbr.py # Topologically Sorted Source Nodes: [addcmul], Original ATen: [aten.addcmul] # Source node to ATen node mapping: # addcmul => add, mul, mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul_1), kwargs = {}) triton_poi_fused_addcmul_0 = async_compile.triton('triton_poi_fused_addcmul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_addcmul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_addcmul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [addcmul], Original ATen: [aten.addcmul] stream0 = get_raw_stream(0) triton_poi_fused_addcmul_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data class Affine(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(torch.ones((1, 1, dim))) self.beta = nn.Parameter(torch.zeros((1, 1, dim))) def forward(self, x): return torch.addcmul(self.beta, self.alpha, x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_addcmul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x2, xmask) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_addcmul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, primals_3 class AffineNew(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(torch.ones((1, 1, dim))) self.beta = nn.Parameter(torch.zeros((1, 1, dim))) def forward(self, input_0): primals_1 = self.alpha primals_2 = self.beta primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Yuki-Tanaka-33937424/pytorch-image-models
Affine
false
12,027
[ "Apache-2.0" ]
0
6c1da622dcb2a0421aeb6cdcadd03cc366331f66
https://github.com/Yuki-Tanaka-33937424/pytorch-image-models/tree/6c1da622dcb2a0421aeb6cdcadd03cc366331f66
ResizeTransform
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/2h/c2hzrtqhbvxaedsmk5yf4w3blae4viyram4eduvj75lltgf3jdhn.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add] # Source node to ATen node mapping: # x => _unsafe_index, _unsafe_index_1, add_1, clamp_max_1, clamp_min, clamp_min_1, convert_element_type, convert_element_type_1, iota, mul, mul_1, sub, sub_1 # x_1 => mul_2 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (1,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_1), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.25), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 - tmp0 tmp3 = 0.0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp6 = 0.25 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as nnf class ResizeTransform(nn.Module): """ Resize a transform, which involves resizing the vector field *and* rescaling it. """ def __init__(self, vel_resize, ndims): super().__init__() self.factor = 1.0 / vel_resize self.mode = 'linear' if ndims == 2: self.mode = 'bi' + self.mode elif ndims == 3: self.mode = 'tri' + self.mode def forward(self, x): if self.factor < 1: x = nnf.interpolate(x, align_corners=True, scale_factor=self. factor, mode=self.mode) x = self.factor * x elif self.factor > 1: x = self.factor * x x = nnf.interpolate(x, align_corners=True, scale_factor=self. factor, mode=self.mode) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'vel_resize': 4, 'ndims': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 - tmp0 tmp3 = 0.0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp6 = 0.25 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class ResizeTransformNew(nn.Module): """ Resize a transform, which involves resizing the vector field *and* rescaling it. """ def __init__(self, vel_resize, ndims): super().__init__() self.factor = 1.0 / vel_resize self.mode = 'linear' if ndims == 2: self.mode = 'bi' + self.mode elif ndims == 3: self.mode = 'tri' + self.mode def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Zer0-00/voxelmorph
ResizeTransform
false
12,028
[ "Apache-2.0" ]
0
ed2e0384cf22d19f7e57bea5887fc197d55f60bc
https://github.com/Zer0-00/voxelmorph/tree/ed2e0384cf22d19f7e57bea5887fc197d55f60bc
MPJPELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zu/czujx3x7ghsdpfovcj7w56ztapmlm4ti2434djcb5yn2khft5ydg.py # Topologically Sorted Source Nodes: [sub, norm, loss, mul], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.mean, aten.mul] # Source node to ATen node mapping: # loss => mean # mul => mul # norm => pow_1, pow_2, sum_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_linalg_vector_norm_mean_mul_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = 64.0 tmp24 = tmp22 / tmp23 tmp25 = 1.0 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, norm, loss, mul], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_sub_0.run(buf1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MPJPELoss(nn.Module): """MPJPE (Mean Per Joint Position Error) loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = torch.mean(torch.norm((output - target) * target_weight, dim=-1)) else: loss = torch.mean(torch.norm(output - target, dim=-1)) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = 64.0 tmp24 = tmp22 / tmp23 tmp25 = 1.0 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MPJPELossNew(nn.Module): """MPJPE (Mean Per Joint Position Error) loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZephyrII/mmpose_charger
MPJPELoss
false
12,029
[ "Apache-2.0" ]
0
ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
https://github.com/ZephyrII/mmpose_charger/tree/ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
FocalLossBinary
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ri/crimnfoch5j6tka32nd746ykaaa7p742j3zpyuv2dmcrylff2dyl.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, logpt, pt, sub, pow_1, neg_1, loss, mul_1, sub_1, mul_2, add, loss_1, loss_2], Original ATen: [aten.binary_cross_entropy_with_logits, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mul, aten.add, aten.mean] # Source node to ATen node mapping: # add => add # binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2 # logpt => neg_1 # loss => mul_1 # loss_1 => mul_4 # loss_2 => mean # mul_1 => mul_2 # mul_2 => mul_3 # neg_1 => neg_2 # pow_1 => pow_1 # pt => exp_1 # sub => sub_3 # sub_1 => sub_4 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %view_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %view_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%view_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sub_2,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2.0), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_2, %neg_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 0.25), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, 0.75), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %add), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = -tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = tmp1 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = -tmp16 tmp18 = tmp17 * tmp13 tmp19 = 0.25 tmp20 = tmp0 * tmp19 tmp21 = 0.75 tmp22 = tmp2 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = tmp18 * tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 256.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, logpt, pt, sub, pow_1, neg_1, loss, mul_1, sub_1, mul_2, add, loss_1, loss_2], Original ATen: [aten.binary_cross_entropy_with_logits, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mul, aten.add, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.nn.functional as F import torch.nn.functional from functools import partial from torch.nn.modules.loss import _Loss def reduced_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', threshold: 'float'=0.5, gamma: 'float'=2.0, reduction='mean'): """ Compute reduced focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. "batchwise_mean" computes mean loss per sample in batch. Default: "mean" See https://arxiv.org/abs/1903.01347 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits(outputs, targets, reduction ='none') pt = torch.exp(logpt) focal_reduction = ((1.0 - pt) / threshold).pow(gamma) focal_reduction[pt < threshold] = 1 loss = -focal_reduction * logpt if reduction == 'mean': loss = loss.mean() if reduction == 'sum': loss = loss.sum() if reduction == 'batchwise_mean': loss = loss.sum(0) return loss def sigmoid_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', gamma: 'float'=2.0, alpha: 'float'=0.25, reduction: 'str'='mean'): """ Compute binary focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/loss/losses.py # noqa: E501 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits(outputs, targets, reduction ='none') pt = torch.exp(logpt) loss = -(1 - pt).pow(gamma) * logpt if alpha is not None: loss = loss * (alpha * targets + (1 - alpha) * (1 - targets)) if reduction == 'mean': loss = loss.mean() if reduction == 'sum': loss = loss.sum() if reduction == 'batchwise_mean': loss = loss.sum(0) return loss class FocalLossBinary(_Loss): def __init__(self, ignore: 'int'=None, reduced: 'bool'=False, gamma: 'float'=2.0, alpha: 'float'=0.25, threshold: 'float'=0.5, reduction: 'str'='mean'): """ Compute focal loss for binary classification problem. """ super().__init__() self.ignore = ignore if reduced: self.loss_fn = partial(reduced_focal_loss, gamma=gamma, threshold=threshold, reduction=reduction) else: self.loss_fn = partial(sigmoid_focal_loss, gamma=gamma, alpha= alpha, reduction=reduction) def forward(self, logits, targets): """ Args: logits: [bs; ...] targets: [bs; ...] """ targets = targets.view(-1) logits = logits.view(-1) if self.ignore is not None: not_ignored = targets != self.ignore logits = logits[not_ignored] targets = targets[not_ignored] loss = self.loss_fn(logits, targets) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.jit import torch.nn.functional as F import torch.nn.functional from functools import partial from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = -tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = tmp1 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = -tmp16 tmp18 = tmp17 * tmp13 tmp19 = 0.25 tmp20 = tmp0 * tmp19 tmp21 = 0.75 tmp22 = tmp2 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = tmp18 * tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 256.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0[ grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduced_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', threshold: 'float'=0.5, gamma: 'float'=2.0, reduction='mean'): """ Compute reduced focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. "batchwise_mean" computes mean loss per sample in batch. Default: "mean" See https://arxiv.org/abs/1903.01347 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits(outputs, targets, reduction ='none') pt = torch.exp(logpt) focal_reduction = ((1.0 - pt) / threshold).pow(gamma) focal_reduction[pt < threshold] = 1 loss = -focal_reduction * logpt if reduction == 'mean': loss = loss.mean() if reduction == 'sum': loss = loss.sum() if reduction == 'batchwise_mean': loss = loss.sum(0) return loss def sigmoid_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', gamma: 'float'=2.0, alpha: 'float'=0.25, reduction: 'str'='mean'): """ Compute binary focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/loss/losses.py # noqa: E501 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits(outputs, targets, reduction ='none') pt = torch.exp(logpt) loss = -(1 - pt).pow(gamma) * logpt if alpha is not None: loss = loss * (alpha * targets + (1 - alpha) * (1 - targets)) if reduction == 'mean': loss = loss.mean() if reduction == 'sum': loss = loss.sum() if reduction == 'batchwise_mean': loss = loss.sum(0) return loss class FocalLossBinaryNew(_Loss): def __init__(self, ignore: 'int'=None, reduced: 'bool'=False, gamma: 'float'=2.0, alpha: 'float'=0.25, threshold: 'float'=0.5, reduction: 'str'='mean'): """ Compute focal loss for binary classification problem. """ super().__init__() self.ignore = ignore if reduced: self.loss_fn = partial(reduced_focal_loss, gamma=gamma, threshold=threshold, reduction=reduction) else: self.loss_fn = partial(sigmoid_focal_loss, gamma=gamma, alpha= alpha, reduction=reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZhongYingMatrix/nnUNet
FocalLossBinary
false
12,030
[ "Apache-2.0" ]
0
c3f028e79d4d5c3f2eb58396ffd0ae54048c132b
https://github.com/ZhongYingMatrix/nnUNet/tree/c3f028e79d4d5c3f2eb58396ffd0ae54048c132b
ArcMarginProduct
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ce/cceitkb57gvjhg5ldpl5r7iv6yefxb7tczymuq3ptw4ifiklh2os.py # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F class ArcMarginProduct(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features) ) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine = F.linear(F.normalize(features), F.normalize(self.weight)) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class ArcMarginProductNew(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features) ) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
aaron276h/kaggle-rcic-1st
ArcMarginProduct
false
12,031
[ "MIT" ]
0
d35e97847df3c29f548e60bc936d3fec7a0a4c08
https://github.com/aaron276h/kaggle-rcic-1st/tree/d35e97847df3c29f548e60bc936d3fec7a0a4c08
LinearNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/s5/cs5s36nllf54w34e2o6wz5k5sm5uucj4lafe2tqol5j7l6dynqls.py # Topologically Sorted Source Nodes: [x_act_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # x_act_1 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_2,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 60 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 15 x1 = (xindex // 15) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (17*x1)), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (17, 64), (64, 1)) assert_size_stride(primals_3, (17, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 17), (17, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 17), (1, 64), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 15), (15, 1), torch.float32) # Topologically Sorted Source Nodes: [x_act_1], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf0, buf1, 60, grid=grid(60), stream=stream0) return (buf1, reinterpret_tensor(buf0, (4, ), (17, ), 16), reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((17, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((17, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class LinearNet(nn.Module): def __init__(self, board_width, board_height): super(LinearNet, self).__init__() self.board_width = board_width self.board_height = board_height self.model = nn.Linear(in_features=4 * self.board_width * self. board_height, out_features=self.board_width * self.board_height + 1 ) def forward(self, state_input): B = state_input.shape[0] x = state_input.reshape(B, -1) x = self.model(x) x_act, x_val = x[:, :-2], x[:, -1] x_act = F.sigmoid(x_act) return x_act, x_val def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'board_width': 4, 'board_height': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 60 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 15 x1 = xindex // 15 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 17 * x1), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (17, 64), (64, 1)) assert_size_stride(primals_3, (17,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 17), (17, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 17), (1, 64), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 15), (15, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(60)](buf0, buf1, 60, XBLOCK=64, num_warps=1, num_stages=1) return buf1, reinterpret_tensor(buf0, (4,), (17,), 16), reinterpret_tensor( primals_1, (4, 64), (64, 1), 0), buf1 class LinearNetNew(nn.Module): def __init__(self, board_width, board_height): super(LinearNetNew, self).__init__() self.board_width = board_width self.board_height = board_height self.model = nn.Linear(in_features=4 * self.board_width * self. board_height, out_features=self.board_width * self.board_height + 1 ) def forward(self, input_0): primals_2 = self.model.weight primals_3 = self.model.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
ZiwenZhuang/AlphaZero_Gomoku
LinearNet
false
12,032
[ "MIT" ]
0
72db1c3eda1f6133da24c924da6032ea3569076e
https://github.com/ZiwenZhuang/AlphaZero_Gomoku/tree/72db1c3eda1f6133da24c924da6032ea3569076e
ScaleLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ft/cftyyxciwm3nmyaqzcpgsgwahlixgbpqkmrfx5nrib6smnhkored.py # Topologically Sorted Source Nodes: [mul, scale, mul_1], Original ATen: [aten.mul, aten.abs] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # scale => abs_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %abs_1), kwargs = {}) triton_poi_fused_abs_mul_0 = async_compile.triton('triton_poi_fused_abs_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tmp0 * tmp5 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, scale, mul_1], Original ATen: [aten.mul, aten.abs] stream0 = get_raw_stream(0) triton_poi_fused_abs_mul_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) return (buf0, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._utils class ScaleLayer(nn.Module): def __init__(self, init_value=1.0, lr_mult=1): super().__init__() self.lr_mult = lr_mult self.scale = nn.Parameter(torch.full((1,), init_value / lr_mult, dtype=torch.float32)) def forward(self, x): scale = torch.abs(self.scale * self.lr_mult) return x * scale def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tmp0 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_mul_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf0, primals_1, primals_2 class ScaleLayerNew(nn.Module): def __init__(self, init_value=1.0, lr_mult=1): super().__init__() self.lr_mult = lr_mult self.scale = nn.Parameter(torch.full((1,), init_value / lr_mult, dtype=torch.float32)) def forward(self, input_0): primals_1 = self.scale primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
aagaard/ritm_interactive_segmentation
ScaleLayer
false
12,033
[ "MIT" ]
0
c68b45a54e99eb5401f50e62f7e43a11e34964ee
https://github.com/aagaard/ritm_interactive_segmentation/tree/c68b45a54e99eb5401f50e62f7e43a11e34964ee
SoftIoU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/qv/cqvwf3g7kgalrnapvqdkoeliul5ujg3hpph3o4r4tfioorwjt4dy.py # Topologically Sorted Source Nodes: [pred, mul, sample_weight, mul_1, sum_1, max_1, mul_2, sum_2, add, truediv, loss], Original ATen: [aten.sigmoid, aten.mul, aten.ne, aten.sum, aten.maximum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # loss => sub # max_1 => maximum # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pred => sigmoid # sample_weight => ne # sum_1 => sum_1 # sum_2 => sum_2 # truediv => div # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg0_1), kwargs = {}) # %ne : [num_users=2] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, -1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %ne), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1, 2, 3]), kwargs = {}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%sigmoid, %arg0_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%maximum, %ne), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1, 2, 3]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {}) triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = -1.0 tmp5 = tmp2 != tmp4 tmp6 = tmp5.to(tl.float32) tmp7 = tmp3 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = triton_helpers.maximum(tmp1, tmp2) tmp13 = tmp12 * tmp6 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = 1e-08 tmp19 = tmp17 + tmp18 tmp20 = tmp11 / tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [pred, mul, sample_weight, mul_1, sum_1, max_1, mul_2, sum_2, add, truediv, loss], Original ATen: [aten.sigmoid, aten.mul, aten.ne, aten.sum, aten.maximum, aten.add, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0.run(buf2, arg1_1, arg0_1, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._utils class SoftIoU(nn.Module): def __init__(self, from_sigmoid=False, ignore_label=-1): super().__init__() self._from_sigmoid = from_sigmoid self._ignore_label = ignore_label def forward(self, pred, label): label = label.view(pred.size()) sample_weight = label != self._ignore_label if not self._from_sigmoid: pred = torch.sigmoid(pred) loss = 1.0 - torch.sum(pred * label * sample_weight, dim=(1, 2, 3)) / ( torch.sum(torch.max(pred, label) * sample_weight, dim=(1, 2, 3) ) + 1e-08) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = -1.0 tmp5 = tmp2 != tmp4 tmp6 = tmp5.to(tl.float32) tmp7 = tmp3 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = triton_helpers.maximum(tmp1, tmp2) tmp13 = tmp12 * tmp6 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = 1e-08 tmp19 = tmp17 + tmp18 tmp20 = tmp11 / tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0[grid(4)]( buf2, arg1_1, arg0_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class SoftIoUNew(nn.Module): def __init__(self, from_sigmoid=False, ignore_label=-1): super().__init__() self._from_sigmoid = from_sigmoid self._ignore_label = ignore_label def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aagaard/ritm_interactive_segmentation
SoftIoU
false
12,034
[ "MIT" ]
0
c68b45a54e99eb5401f50e62f7e43a11e34964ee
https://github.com/aagaard/ritm_interactive_segmentation/tree/c68b45a54e99eb5401f50e62f7e43a11e34964ee
PatchMerging
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/2j/c2jskczly24ilkcdwwjvkvq74mjuqo2ltaw546orkjkjvbgvgeei.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.native_layer_norm] # Source node to ATen node mapping: # x => cat # x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_4, %slice_9, %slice_14, %slice_19], -1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%cat, [4]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_per_fused_cat_native_layer_norm_0 = async_compile.triton('triton_per_fused_cat_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_native_layer_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2 x1 = (xindex // 2) x3 = xindex tmp46 = tl.load(in_ptr1 + (r2), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (r2), None, eviction_policy='evict_last') tmp0 = r2 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((8*x0) + (32*x1) + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + (8*x0) + (32*x1) + ((-4) + r2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 + (8*x0) + (32*x1) + ((-8) + r2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1, 1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (20 + (8*x0) + (32*x1) + ((-12) + r2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tl.full([XBLOCK, 1], 16, tl.int32) tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 / tmp31 tmp33 = tmp23 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.where(xmask, tmp35, 0) tmp38 = tl.sum(tmp37, 1)[:, None] tmp39 = 16.0 tmp40 = tmp38 / tmp39 tmp41 = 1e-05 tmp42 = tmp40 + tmp41 tmp43 = libdevice.rsqrt(tmp42) tmp44 = tmp22 - tmp32 tmp45 = tmp44 * tmp43 tmp47 = tmp45 * tmp46 tmp49 = tmp47 + tmp48 tl.store(out_ptr0 + (r2 + (16*x3)), tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp43, xmask) tl.store(out_ptr2 + (r2 + (16*x3)), tmp49, xmask) tl.store(out_ptr1 + (x3), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (8, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 64), torch.float32) buf4 = reinterpret_tensor(buf2, (4, 4, 2, 2, 1), (16, 4, 2, 1, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_per_fused_cat_native_layer_norm_0.run(buf4, primals_1, primals_2, primals_3, buf0, buf1, buf5, 64, 16, grid=grid(64), stream=stream0) del primals_1 del primals_2 del primals_3 buf6 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf5, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 8), (1, 16), 0), out=buf6) return (reinterpret_tensor(buf6, (4, 4, 2, 2, 8), (128, 32, 16, 8, 1), 0), buf0, buf1, buf4, reinterpret_tensor(buf5, (64, 16), (16, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 16), (16, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class PatchMerging(nn.Module): """ Patch Merging Layer Args: dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, dim, norm_layer=nn.LayerNorm): super().__init__() self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, x): """ Forward function. Args: x: Input feature, tensor size (B, D, H, W, C). """ _B, _D, H, W, _C = x.shape pad_input = H % 2 == 1 or W % 2 == 1 if pad_input: x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) x0 = x[:, :, 0::2, 0::2, :] x1 = x[:, :, 1::2, 0::2, :] x2 = x[:, :, 0::2, 1::2, :] x3 = x[:, :, 1::2, 1::2, :] x = torch.cat([x0, x1, x2, x3], -1) x = self.norm(x) x = self.reduction(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_cat_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2 x1 = xindex // 2 x3 = xindex tmp46 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp0 = r2 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (8 * x0 + 32 * x1 + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + 8 * x0 + 32 * x1 + (-4 + r2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 + 8 * x0 + 32 * x1 + (-8 + r2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1, 1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (20 + 8 * x0 + 32 * x1 + (-12 + r2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tl.full([XBLOCK, 1], 16, tl.int32) tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 / tmp31 tmp33 = tmp23 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.where(xmask, tmp35, 0) tmp38 = tl.sum(tmp37, 1)[:, None] tmp39 = 16.0 tmp40 = tmp38 / tmp39 tmp41 = 1e-05 tmp42 = tmp40 + tmp41 tmp43 = libdevice.rsqrt(tmp42) tmp44 = tmp22 - tmp32 tmp45 = tmp44 * tmp43 tmp47 = tmp45 * tmp46 tmp49 = tmp47 + tmp48 tl.store(out_ptr0 + (r2 + 16 * x3), tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp43, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp49, xmask) tl.store(out_ptr1 + x3, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (8, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 1), torch. float32) buf2 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 64), torch .float32) buf4 = reinterpret_tensor(buf2, (4, 4, 2, 2, 1), (16, 4, 2, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused_cat_native_layer_norm_0[grid(64)](buf4, primals_1, primals_2, primals_3, buf0, buf1, buf5, 64, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 del primals_3 buf6 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 8), (1, 16), 0), out=buf6) return reinterpret_tensor(buf6, (4, 4, 2, 2, 8), (128, 32, 16, 8, 1), 0 ), buf0, buf1, buf4, reinterpret_tensor(buf5, (64, 16), (16, 1), 0 ), primals_4 class PatchMergingNew(nn.Module): """ Patch Merging Layer Args: dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, dim, norm_layer=nn.LayerNorm): super().__init__() self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, input_0): primals_4 = self.reduction.weight primals_2 = self.norm.weight primals_3 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
acewjh/Video-Swin-Transformer
PatchMerging
false
12,035
[ "Apache-2.0" ]
0
bfbc8dde12e991455b34b921ca45a978b4dbfdbc
https://github.com/acewjh/Video-Swin-Transformer/tree/bfbc8dde12e991455b34b921ca45a978b4dbfdbc
IrisClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/nu/cnuuaznpt4szfn74bn46qfjkdypvlkfa5x44ywjpperdjt2a66rj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 10), (10, 1)) assert_size_stride(primals_5, (10, ), (1, )) assert_size_stride(primals_6, (3, 10), (10, 1)) assert_size_stride(primals_7, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 640, grid=grid(640), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 10), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0); del buf2 # reuse buf8 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf8, 640, grid=grid(640), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.native_dropout] buf4 = torch.ops.aten.native_dropout.default(buf3, 0.2, True) del buf3 buf5 = buf4[0] buf6 = buf4[1] del buf4 buf7 = empty_strided_cuda((64, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 10), (10, 1), 0), reinterpret_tensor(primals_6, (10, 3), (1, 10), 0), alpha=1, beta=1, out=buf7) del primals_7 return (reinterpret_tensor(buf7, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), buf6, reinterpret_tensor(buf5, (64, 10), (10, 1), 0), primals_6, buf8, primals_4, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((10, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((3, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class IrisClassifier(nn.Module): def __init__(self): super(IrisClassifier, self).__init__() self.fc1 = nn.Linear(4, 10) self.fc2 = nn.Linear(10, 10) self.fc3 = nn.Linear(10, 3) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.dropout(x, 0.2) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 10), (10, 1)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (3, 10), (10, 1)) assert_size_stride(primals_7, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1, primals_2, buf9, 640, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 10), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf3, primals_5, buf8, 640, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = torch.ops.aten.native_dropout.default(buf3, 0.2, True) del buf3 buf5 = buf4[0] buf6 = buf4[1] del buf4 buf7 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 10), (10, 1), 0), reinterpret_tensor(primals_6, (10, 3), (1, 10), 0), alpha=1, beta=1, out=buf7) del primals_7 return reinterpret_tensor(buf7, (4, 4, 4, 3), (48, 12, 3, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 10), (10, 1), 0 ), buf6, reinterpret_tensor(buf5, (64, 10), (10, 1), 0 ), primals_6, buf8, primals_4, buf9 class IrisClassifierNew(nn.Module): def __init__(self): super(IrisClassifierNew, self).__init__() self.fc1 = nn.Linear(4, 10) self.fc2 = nn.Linear(10, 10) self.fc3 = nn.Linear(10, 3) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
abhinavthomas/mlflow
IrisClassifier
false
12,036
[ "Apache-2.0" ]
0
1942d788e98e565229615373b4fd6c0899b4026b
https://github.com/abhinavthomas/mlflow/tree/1942d788e98e565229615373b4fd6c0899b4026b
MaskedLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/rt/crtmixcsfkaxel6ijhy4jbyokf7hyglahuarpln3ayfqwzdcmlxu.py # Topologically Sorted Source Nodes: [sub, diff, getitem, square, mean], Original ATen: [aten.sub, aten.div, aten.index, aten.pow, aten.mean] # Source node to ATen node mapping: # diff => div # getitem => index # mean => mean # square => pow_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 5.0), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div, [%arg2_1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) triton_per_fused_div_index_mean_pow_sub_0 = async_compile.triton('triton_per_fused_div_index_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_index_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_index_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = (rindex // 64) r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last') tmp1 = tl.full([RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (r0 + (64*tmp4)), None) tmp7 = tl.load(in_ptr2 + (r0 + (64*tmp4)), None) tmp8 = tmp6 - tmp7 tmp9 = 0.2 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, diff, getitem, square, mean], Original ATen: [aten.sub, aten.div, aten.index, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_div_index_mean_pow_sub_0.run(buf1, arg2_1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MaskedLoss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y, Y, mask): diff = (y - Y) / 5.0 return torch.mean(torch.square(diff[mask])) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.ones( [4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_index_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex // 64 r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp1 = tl.full([RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (r0 + 64 * tmp4), None) tmp7 = tl.load(in_ptr2 + (r0 + 64 * tmp4), None) tmp8 = tmp6 - tmp7 tmp9 = 0.2 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_index_mean_pow_sub_0[grid(1)](buf1, arg2_1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class MaskedLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
acycliq/cellpose
MaskedLoss
false
12,037
[ "BSD-3-Clause" ]
0
6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
https://github.com/acycliq/cellpose/tree/6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
DenseCrossEntropy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logprobs => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nb/cnbdxy34iv6vkig4bfuqrxbegug3ek6lhyugevz3qctt7efdvtge.py # Topologically Sorted Source Nodes: [logprobs, neg, loss, loss_1, mean], Original ATen: [aten._log_softmax, aten.neg, aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # logprobs => exp, log, sub_1, sum_1 # loss => mul # loss_1 => sum_2 # mean => mean # neg => neg # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) triton_per_fused__log_softmax_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp11 tmp17 = -tmp16 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = tmp5 - tmp11 tmp22 = -tmp21 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp8 - tmp11 tmp27 = -tmp26 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp35, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [logprobs, neg, loss, loss_1, mean], Original ATen: [aten._log_softmax, aten.neg, aten.mul, aten.sum, aten.mean] triton_per_fused__log_softmax_mean_mul_neg_sum_1.run(buf3, buf0, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class DenseCrossEntropy(nn.Module): def forward(self, x, target): x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) loss = -logprobs * target loss = loss.sum(-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp11 tmp17 = -tmp16 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = tmp5 - tmp11 tmp22 = -tmp21 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp8 - tmp11 tmp27 = -tmp26 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3, buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf3, class DenseCrossEntropyNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aaron276h/kaggle-rcic-1st
DenseCrossEntropy
false
12,038
[ "MIT" ]
0
d35e97847df3c29f548e60bc936d3fec7a0a4c08
https://github.com/aaron276h/kaggle-rcic-1st/tree/d35e97847df3c29f548e60bc936d3fec7a0a4c08
L1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/i5/ci5r22vnwphjxav3oibgww4fkm25q4egp3rofzniyjru2u4b563f.py # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul] # Source node to ATen node mapping: # loss => abs_1, mean, sub # mul => mul # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class L1Loss(nn.Module): """L1Loss loss .""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: batch_size: N num_keypoints: K Args: output (torch.Tensor[N, K, 2]): Output regression. target (torch.Tensor[N, K, 2]): Target regression. target_weight (torch.Tensor[N, K, 2]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1LossNew(nn.Module): """L1Loss loss .""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZephyrII/mmpose_charger
L1Loss
false
12,039
[ "Apache-2.0" ]
0
ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
https://github.com/ZephyrII/mmpose_charger/tree/ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
ArcFaceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/d4/cd4nn236j2iacn6ceojlrv7a3i4iyguhy6f5thudh5pspewffmz6.py # Topologically Sorted Source Nodes: [gt, mul, pow_1, sub, sine, mul_1, phi, sub_2, phi_1, mul_2, sub_3, mul_3, output, logprobs], Original ATen: [aten.gt, aten.mul, aten.pow, aten.rsub, aten.sqrt, aten.sub, aten.where, aten.add, aten._log_softmax] # Source node to ATen node mapping: # gt => gt # logprobs => exp, sum_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # output => add # phi => sub_1 # phi_1 => where # pow_1 => pow_1 # sine => sqrt # sub => sub # sub_2 => sub_2 # sub_3 => sub_3 # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, -0.8775825618903726), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.8775825618903728), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %pow_1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, 0.479425538604203), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.23971276930210156), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %sub_1, %sub_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %where), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 30.0), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__log_softmax_add_gt_mul_pow_rsub_sqrt_sub_where_0 = async_compile.triton('triton_poi_fused__log_softmax_add_gt_mul_pow_rsub_sqrt_sub_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_gt_mul_pow_rsub_sqrt_sub_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_add_gt_mul_pow_rsub_sqrt_sub_where_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = -0.8775825618903726 tmp3 = tmp1 > tmp2 tmp4 = 0.8775825618903728 tmp5 = tmp1 * tmp4 tmp6 = tmp1 * tmp1 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp9 = libdevice.sqrt(tmp8) tmp10 = 0.479425538604203 tmp11 = tmp9 * tmp10 tmp12 = tmp5 - tmp11 tmp13 = 0.23971276930210156 tmp14 = tmp1 - tmp13 tmp15 = tl.where(tmp3, tmp12, tmp14) tmp16 = tmp0 * tmp15 tmp17 = tmp7 - tmp0 tmp18 = tmp17 * tmp1 tmp19 = tmp16 + tmp18 tmp20 = tmp19 * tmp7 tmp23 = tmp22 > tmp2 tmp24 = tmp22 * tmp4 tmp25 = tmp22 * tmp22 tmp26 = tmp7 - tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp27 * tmp10 tmp29 = tmp24 - tmp28 tmp30 = tmp22 - tmp13 tmp31 = tl.where(tmp23, tmp29, tmp30) tmp32 = tmp21 * tmp31 tmp33 = tmp7 - tmp21 tmp34 = tmp33 * tmp22 tmp35 = tmp32 + tmp34 tmp36 = tmp35 * tmp7 tmp37 = triton_helpers.maximum(tmp20, tmp36) tmp40 = tmp39 > tmp2 tmp41 = tmp39 * tmp4 tmp42 = tmp39 * tmp39 tmp43 = tmp7 - tmp42 tmp44 = libdevice.sqrt(tmp43) tmp45 = tmp44 * tmp10 tmp46 = tmp41 - tmp45 tmp47 = tmp39 - tmp13 tmp48 = tl.where(tmp40, tmp46, tmp47) tmp49 = tmp38 * tmp48 tmp50 = tmp7 - tmp38 tmp51 = tmp50 * tmp39 tmp52 = tmp49 + tmp51 tmp53 = tmp52 * tmp7 tmp54 = triton_helpers.maximum(tmp37, tmp53) tmp57 = tmp56 > tmp2 tmp58 = tmp56 * tmp4 tmp59 = tmp56 * tmp56 tmp60 = tmp7 - tmp59 tmp61 = libdevice.sqrt(tmp60) tmp62 = tmp61 * tmp10 tmp63 = tmp58 - tmp62 tmp64 = tmp56 - tmp13 tmp65 = tl.where(tmp57, tmp63, tmp64) tmp66 = tmp55 * tmp65 tmp67 = tmp7 - tmp55 tmp68 = tmp67 * tmp56 tmp69 = tmp66 + tmp68 tmp70 = tmp69 * tmp7 tmp71 = triton_helpers.maximum(tmp54, tmp70) tmp72 = tmp20 - tmp71 tmp73 = 30.0 tmp74 = tmp72 * tmp73 tmp75 = tl_math.exp(tmp74) tmp76 = tmp36 - tmp71 tmp77 = tmp76 * tmp73 tmp78 = tl_math.exp(tmp77) tmp79 = tmp75 + tmp78 tmp80 = tmp53 - tmp71 tmp81 = tmp80 * tmp73 tmp82 = tl_math.exp(tmp81) tmp83 = tmp79 + tmp82 tmp84 = tmp70 - tmp71 tmp85 = tmp84 * tmp73 tmp86 = tl_math.exp(tmp85) tmp87 = tmp83 + tmp86 tl.store(out_ptr0 + (x0), tmp71, xmask) tl.store(out_ptr1 + (x0), tmp87, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7p/c7pnycif2hdh2j6dm2ov3e34xtzilxnepnaqyhbcnxfhqyxqgf7u.py # Topologically Sorted Source Nodes: [gt, mul, pow_1, sub, sine, mul_1, phi, sub_2, phi_1, mul_2, sub_3, mul_3, output, logprobs, neg, loss], Original ATen: [aten.gt, aten.mul, aten.pow, aten.rsub, aten.sqrt, aten.sub, aten.where, aten.add, aten._log_softmax, aten.neg] # Source node to ATen node mapping: # gt => gt # logprobs => log, sub_5 # loss => mul_5 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # neg => neg # output => add # phi => sub_1 # phi_1 => where # pow_1 => pow_1 # sine => sqrt # sub => sub # sub_2 => sub_2 # sub_3 => sub_3 # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, -0.8775825618903726), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.8775825618903728), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %pow_1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, 0.479425538604203), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.23971276930210156), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %sub_1, %sub_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %where), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 30.0), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %log), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_5,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg1_1), kwargs = {}) triton_poi_fused__log_softmax_add_gt_mul_neg_pow_rsub_sqrt_sub_where_1 = async_compile.triton('triton_poi_fused__log_softmax_add_gt_mul_neg_pow_rsub_sqrt_sub_where_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_gt_mul_neg_pow_rsub_sqrt_sub_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_add_gt_mul_neg_pow_rsub_sqrt_sub_where_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp21 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = -0.8775825618903726 tmp3 = tmp1 > tmp2 tmp4 = 0.8775825618903728 tmp5 = tmp1 * tmp4 tmp6 = tmp1 * tmp1 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp9 = libdevice.sqrt(tmp8) tmp10 = 0.479425538604203 tmp11 = tmp9 * tmp10 tmp12 = tmp5 - tmp11 tmp13 = 0.23971276930210156 tmp14 = tmp1 - tmp13 tmp15 = tl.where(tmp3, tmp12, tmp14) tmp16 = tmp0 * tmp15 tmp17 = tmp7 - tmp0 tmp18 = tmp17 * tmp1 tmp19 = tmp16 + tmp18 tmp20 = tmp19 * tmp7 tmp22 = tmp20 - tmp21 tmp23 = 30.0 tmp24 = tmp22 * tmp23 tmp26 = tl_math.log(tmp25) tmp27 = tmp24 - tmp26 tmp28 = -tmp27 tmp29 = tmp28 * tmp0 tl.store(out_ptr0 + (x2), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ws/cwsssktu52blpd3xccw4jscrdh35mswkecob5i6wbmoj6bffiyeb.py # Topologically Sorted Source Nodes: [loss_1, loss_2, truediv], Original ATen: [aten.sum, aten.mean, aten.div] # Source node to ATen node mapping: # loss_1 => sum_2 # loss_2 => mean # truediv => div # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [-1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mean, 2), kwargs = {}) triton_per_fused_div_mean_sum_2 = async_compile.triton('triton_per_fused_div_mean_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mean_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp9 / tmp10 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [gt, mul, pow_1, sub, sine, mul_1, phi, sub_2, phi_1, mul_2, sub_3, mul_3, output, logprobs], Original ATen: [aten.gt, aten.mul, aten.pow, aten.rsub, aten.sqrt, aten.sub, aten.where, aten.add, aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_add_gt_mul_pow_rsub_sqrt_sub_where_0.run(arg1_1, arg0_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [gt, mul, pow_1, sub, sine, mul_1, phi, sub_2, phi_1, mul_2, sub_3, mul_3, output, logprobs, neg, loss], Original ATen: [aten.gt, aten.mul, aten.pow, aten.rsub, aten.sqrt, aten.sub, aten.where, aten.add, aten._log_softmax, aten.neg] triton_poi_fused__log_softmax_add_gt_mul_neg_pow_rsub_sqrt_sub_where_1.run(arg1_1, arg0_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [loss_1, loss_2, truediv], Original ATen: [aten.sum, aten.mean, aten.div] triton_per_fused_div_mean_sum_2.run(buf4, buf2, 1, 64, grid=grid(1), stream=stream0) del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class DenseCrossEntropy(nn.Module): def forward(self, x, target): x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) loss = -logprobs * target loss = loss.sum(-1) return loss.mean() class ArcFaceLoss(nn.modules.Module): def __init__(self, s=30.0, m=0.5): super().__init__() self.crit = DenseCrossEntropy() self.s = s self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.th = math.cos(math.pi - m) self.mm = math.sin(math.pi - m) * m def forward(self, logits, labels): logits = logits.float() cosine = logits sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine * self.sin_m phi = torch.where(cosine > self.th, phi, cosine - self.mm) output = labels * phi + (1.0 - labels) * cosine output *= self.s loss = self.crit(output, labels) return loss / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_add_gt_mul_pow_rsub_sqrt_sub_where_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp38 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp39 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp55 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp56 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = -0.8775825618903726 tmp3 = tmp1 > tmp2 tmp4 = 0.8775825618903728 tmp5 = tmp1 * tmp4 tmp6 = tmp1 * tmp1 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp9 = libdevice.sqrt(tmp8) tmp10 = 0.479425538604203 tmp11 = tmp9 * tmp10 tmp12 = tmp5 - tmp11 tmp13 = 0.23971276930210156 tmp14 = tmp1 - tmp13 tmp15 = tl.where(tmp3, tmp12, tmp14) tmp16 = tmp0 * tmp15 tmp17 = tmp7 - tmp0 tmp18 = tmp17 * tmp1 tmp19 = tmp16 + tmp18 tmp20 = tmp19 * tmp7 tmp23 = tmp22 > tmp2 tmp24 = tmp22 * tmp4 tmp25 = tmp22 * tmp22 tmp26 = tmp7 - tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp27 * tmp10 tmp29 = tmp24 - tmp28 tmp30 = tmp22 - tmp13 tmp31 = tl.where(tmp23, tmp29, tmp30) tmp32 = tmp21 * tmp31 tmp33 = tmp7 - tmp21 tmp34 = tmp33 * tmp22 tmp35 = tmp32 + tmp34 tmp36 = tmp35 * tmp7 tmp37 = triton_helpers.maximum(tmp20, tmp36) tmp40 = tmp39 > tmp2 tmp41 = tmp39 * tmp4 tmp42 = tmp39 * tmp39 tmp43 = tmp7 - tmp42 tmp44 = libdevice.sqrt(tmp43) tmp45 = tmp44 * tmp10 tmp46 = tmp41 - tmp45 tmp47 = tmp39 - tmp13 tmp48 = tl.where(tmp40, tmp46, tmp47) tmp49 = tmp38 * tmp48 tmp50 = tmp7 - tmp38 tmp51 = tmp50 * tmp39 tmp52 = tmp49 + tmp51 tmp53 = tmp52 * tmp7 tmp54 = triton_helpers.maximum(tmp37, tmp53) tmp57 = tmp56 > tmp2 tmp58 = tmp56 * tmp4 tmp59 = tmp56 * tmp56 tmp60 = tmp7 - tmp59 tmp61 = libdevice.sqrt(tmp60) tmp62 = tmp61 * tmp10 tmp63 = tmp58 - tmp62 tmp64 = tmp56 - tmp13 tmp65 = tl.where(tmp57, tmp63, tmp64) tmp66 = tmp55 * tmp65 tmp67 = tmp7 - tmp55 tmp68 = tmp67 * tmp56 tmp69 = tmp66 + tmp68 tmp70 = tmp69 * tmp7 tmp71 = triton_helpers.maximum(tmp54, tmp70) tmp72 = tmp20 - tmp71 tmp73 = 30.0 tmp74 = tmp72 * tmp73 tmp75 = tl_math.exp(tmp74) tmp76 = tmp36 - tmp71 tmp77 = tmp76 * tmp73 tmp78 = tl_math.exp(tmp77) tmp79 = tmp75 + tmp78 tmp80 = tmp53 - tmp71 tmp81 = tmp80 * tmp73 tmp82 = tl_math.exp(tmp81) tmp83 = tmp79 + tmp82 tmp84 = tmp70 - tmp71 tmp85 = tmp84 * tmp73 tmp86 = tl_math.exp(tmp85) tmp87 = tmp83 + tmp86 tl.store(out_ptr0 + x0, tmp71, xmask) tl.store(out_ptr1 + x0, tmp87, xmask) @triton.jit def triton_poi_fused__log_softmax_add_gt_mul_neg_pow_rsub_sqrt_sub_where_1( in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp21 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = -0.8775825618903726 tmp3 = tmp1 > tmp2 tmp4 = 0.8775825618903728 tmp5 = tmp1 * tmp4 tmp6 = tmp1 * tmp1 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp9 = libdevice.sqrt(tmp8) tmp10 = 0.479425538604203 tmp11 = tmp9 * tmp10 tmp12 = tmp5 - tmp11 tmp13 = 0.23971276930210156 tmp14 = tmp1 - tmp13 tmp15 = tl.where(tmp3, tmp12, tmp14) tmp16 = tmp0 * tmp15 tmp17 = tmp7 - tmp0 tmp18 = tmp17 * tmp1 tmp19 = tmp16 + tmp18 tmp20 = tmp19 * tmp7 tmp22 = tmp20 - tmp21 tmp23 = 30.0 tmp24 = tmp22 * tmp23 tmp26 = tl_math.log(tmp25) tmp27 = tmp24 - tmp26 tmp28 = -tmp27 tmp29 = tmp28 * tmp0 tl.store(out_ptr0 + x2, tmp29, xmask) @triton.jit def triton_per_fused_div_mean_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp9 / tmp10 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_add_gt_mul_pow_rsub_sqrt_sub_where_0[grid (64)](arg1_1, arg0_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_add_gt_mul_neg_pow_rsub_sqrt_sub_where_1[ grid(256)](arg1_1, arg0_1, buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_div_mean_sum_2[grid(1)](buf4, buf2, 1, 64, XBLOCK= 1, num_warps=2, num_stages=1) del buf2 return buf4, class DenseCrossEntropy(nn.Module): def forward(self, x, target): x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) loss = -logprobs * target loss = loss.sum(-1) return loss.mean() class ArcFaceLossNew(nn.modules.Module): def __init__(self, s=30.0, m=0.5): super().__init__() self.crit = DenseCrossEntropy() self.s = s self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.th = math.cos(math.pi - m) self.mm = math.sin(math.pi - m) * m def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aaron276h/kaggle-rcic-1st
ArcFaceLoss
false
12,040
[ "MIT" ]
0
d35e97847df3c29f548e60bc936d3fec7a0a4c08
https://github.com/aaron276h/kaggle-rcic-1st/tree/d35e97847df3c29f548e60bc936d3fec7a0a4c08
SplAtConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cq/ccqcunoz44ytyqzy34r7cs2t74rk42s65mtajwcrygug6wcgzq24.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ve/cvelarc6diodfr33zpvmvgzuuzkvprbz6m6y4ohg2zlecikuyoyi.py # Topologically Sorted Source Nodes: [add, gap, gap_1], Original ATen: [aten.add, aten.mean] # Source node to ATen node mapping: # add => add # gap => add_1 # gap_1 => mean # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %getitem_5), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1, -2], True), kwargs = {}) triton_poi_fused_add_mean_1 = async_compile.triton('triton_poi_fused_add_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/t4/ct4hhjqt2vge2xiycaomw3jiwzw326vnuf5jpebeysc4mpxrpciw.py # Topologically Sorted Source Nodes: [gap_2, gap_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # gap_2 => convolution_1 # gap_3 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ym/cymnwinsgdg6rzof735q2mbji4z4uuuzffbkib2mmjzjggqvt5ti.py # Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution] # Source node to ATen node mapping: # atten => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/al/calq4luacvjpbhaq6oadffm56qzlddnaiqtfsg2rnpgdoin4doyd.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_3 => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%permute, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 8) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (8*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (8*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/np/cnpcapaif5ggvdkeg53tnm4dojax33drj6dlnhtwxrttlaoiy23c.py # Topologically Sorted Source Nodes: [mul, mul_1, add_2, out], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add_2 => add_2 # mul => mul # mul_1 => mul_1 # out => add_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_6, %getitem_2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_7, %getitem_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_1), kwargs = {}) triton_poi_fused_add_mul_5 = async_compile.triton('triton_poi_fused_add_mul_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (8*x1)), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp6 = tl.load(in_ptr1 + (4 + x0 + (8*x1)), xmask) tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 * tmp6 tmp8 = tmp4 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (8, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (8, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None) assert_size_stride(buf0, (4, 8, 1, 1), (8, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 8, 1, 1), (8, 1, 32, 32), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf9, 32, grid=grid(32), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [add, gap, gap_1], Original ATen: [aten.add, aten.mean] triton_poi_fused_add_mean_1.run(buf1, buf2, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [gap_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 1, 1), (32, 1, 1, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [gap_2, gap_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, 128, grid=grid(128), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 8, 1, 1), (8, 1, 1, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf6, primals_7, 32, grid=grid(32), stream=stream0) del primals_7 buf7 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf6, buf7, 32, grid=grid(32), stream=stream0) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, add_2, out], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_5.run(buf7, buf1, buf8, 16, grid=grid(16), stream=stream0) return (buf8, primals_1, primals_3, primals_4, primals_6, reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf2, buf4, buf6, reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import ReLU from torch.nn.modules.utils import _pair class DropBlock2D(object): def __init__(self, *args, **kwargs): raise NotImplementedError class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplAtConv2d(Module): """Split-Attention Conv2d """ def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm_layer= None, dropblock_prob=0.0, **kwargs): super(SplAtConv2d, self).__init__() padding = _pair(padding) self.rectify = rectify and (padding[0] > 0 or padding[1] > 0) self.rectify_avg = rectify_avg inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.cardinality = groups self.channels = channels self.dropblock_prob = dropblock_prob if self.rectify: self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) else: self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.use_bn = norm_layer is not None if self.use_bn: self.bn0 = norm_layer(channels * radix) self.relu = ReLU(inplace=True) self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality) if self.use_bn: self.bn1 = norm_layer(inter_channels) self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self. cardinality) if dropblock_prob > 0.0: self.dropblock = DropBlock2D(dropblock_prob, 3) self.rsoftmax = rSoftMax(radix, groups) def forward(self, x): x = self.conv(x) if self.use_bn: x = self.bn0(x) if self.dropblock_prob > 0.0: x = self.dropblock(x) x = self.relu(x) batch, rchannel = x.shape[:2] if self.radix > 1: if torch.__version__ < '1.5': splited = torch.split(x, int(rchannel // self.radix), dim=1) else: splited = torch.split(x, rchannel // self.radix, dim=1) gap = sum(splited) else: gap = x gap = F.adaptive_avg_pool2d(gap, 1) gap = self.fc1(gap) if self.use_bn: gap = self.bn1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1) if self.radix > 1: if torch.__version__ < '1.5': attens = torch.split(atten, int(rchannel // self.radix), dim=1) else: attens = torch.split(atten, rchannel // self.radix, dim=1) out = sum([(att * split) for att, split in zip(attens, splited)]) else: out = atten * x return out.contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.nn as nn import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import ReLU from torch.nn.modules.utils import _pair assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 8 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp6 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask) tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 * tmp6 tmp8 = tmp4 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (8, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (8, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None) assert_size_stride(buf0, (4, 8, 1, 1), (8, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 8, 1, 1), (8, 1, 32, 32), 0) del buf0 buf9 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(32)](buf1, primals_2, buf9, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_mean_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 1, 1), (32, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_2[grid(128)](buf4, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 8, 1, 1), (8, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_3[grid(32)](buf6, primals_7, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_7 buf7 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(32)](buf6, buf7, 32, XBLOCK=32, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_mul_5[grid(16)](buf7, buf1, buf8, 16, XBLOCK= 16, num_warps=1, num_stages=1) return (buf8, primals_1, primals_3, primals_4, primals_6, reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf2, buf4, buf6, reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf9) class DropBlock2D(object): def __init__(self, *args, **kwargs): raise NotImplementedError class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplAtConv2dNew(Module): """Split-Attention Conv2d """ def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm_layer= None, dropblock_prob=0.0, **kwargs): super(SplAtConv2dNew, self).__init__() padding = _pair(padding) self.rectify = rectify and (padding[0] > 0 or padding[1] > 0) self.rectify_avg = rectify_avg inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.cardinality = groups self.channels = channels self.dropblock_prob = dropblock_prob if self.rectify: self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) else: self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.use_bn = norm_layer is not None if self.use_bn: self.bn0 = norm_layer(channels * radix) self.relu = ReLU(inplace=True) self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality) if self.use_bn: self.bn1 = norm_layer(inter_channels) self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self. cardinality) if dropblock_prob > 0.0: self.dropblock = DropBlock2D(dropblock_prob, 3) self.rsoftmax = rSoftMax(radix, groups) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
XuYongi/KiNet
SplAtConv2d
false
12,041
[ "MIT" ]
0
fab8865a09e3779baf0daf1db1bf59a9cfbde450
https://github.com/XuYongi/KiNet/tree/fab8865a09e3779baf0daf1db1bf59a9cfbde450
Simplenet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 6 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = (xindex // 14) x2 = (xindex // 1176) x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120, ), (1, )) assert_size_stride(primals_8, (100, 120), (120, 1)) assert_size_stride(primals_9, (100, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (120, 100), (1, 120), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((100, 120), (120, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.optim.lr_scheduler import * import torch.nn.functional as F import torch.optim import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.onnx import torch.testing class Simplenet(nn.Module): def __init__(self): super(Simplenet, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 100) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.optim.lr_scheduler import * import torch.optim import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (100, 120), (120, 1)) assert_size_stride(primals_9, (100,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (120, 100), (1, 120), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, primals_8, primals_6) class SimplenetNew(nn.Module): def __init__(self): super(SimplenetNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 100) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
aam12/distiller
Simplenet
false
12,042
[ "Apache-2.0" ]
0
fd06fcba028d023e430cd37d1531bc2ac5202ea6
https://github.com/aam12/distiller/tree/fd06fcba028d023e430cd37d1531bc2ac5202ea6
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/4d/c4d7os35bf4bckecmik4nlyqqsirmteh4sh3yxnab5lmuntnmwk2.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 128 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x2) + (36*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/j5/cj5nf2owtsdm2zwcezqxpyn63iwddjyadpotkhm2ua52inoqxdcl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wv/cwvtp6qflpb42kxrujmda5zselv7wvkz3fgp2tryo2ftsisaildr.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tl/ctlxctn7eg6nwvpdhdhyqadp63cm2ogdwxsotfynexn2zw62nfbb.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ku/ckuw5gg26ddjp4n4da74yttcx6jxcy2y4vb2npxdoq42pzni2oot.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/m3/cm3haovccm7lav2s6wgp3wthu7in42r335z2o7yva4d7olh5begj.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ir/cirx6nbkabstacj3yb3umtzb7ustxzn5ha5etdpsewqc2v53x42u.py # Topologically Sorted Source Nodes: [conv2d_3, x_act], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_act => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (4*x2) + (64*y1)), tmp6, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mx/cmxxnyyr5kmtzdpzon3a5fqr6k4jayrantx3iscrc5pevtb6lc52.py # Topologically Sorted Source Nodes: [x_act_2], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # x_act_2 => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_8 = async_compile.triton('triton_per_fused__log_softmax_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_8(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (16*x0)), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/er/cerrhd6dfklfkghffr4w4v6k4tkknpp6pjf2fpylkzd3qma7oygl.py # Topologically Sorted Source Nodes: [conv2d_4, x_val], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_val => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = (yindex // 2) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (2*x2) + (32*y1)), tmp6, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dy/cdyy4l65r6roouxfxf2rt7jc3yi26kd72lw6ykhtgaiqpacjtrts.py # Topologically Sorted Source Nodes: [x_val_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_val_2 => relu_5 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_15), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_10 = async_compile.triton('triton_poi_fused_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jp/cjpw3qv3xbeeitqrvxn6apmx6vcxqlrxpbbtjnzsyqgtt4tatr6q.py # Topologically Sorted Source Nodes: [x_val_3], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_val_3 => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_17), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_tanh_11 = async_compile.triton('triton_poi_fused_tanh_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (16, 64), (64, 1)) assert_size_stride(primals_11, (16, ), (1, )) assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_13, (2, ), (1, )) assert_size_stride(primals_14, (64, 32), (32, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (1, 64), (64, 1)) assert_size_stride(primals_17, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 128, 9, grid=grid(128, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 2048, 9, grid=grid(2048, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf5, primals_2, 2048, grid=grid(2048), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf7, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf9, primals_7, 8192, grid=grid(8192), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4)) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x_act], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_7.run(buf10, primals_9, buf11, buf23, 16, 16, grid=grid(16, 16), stream=stream0) del primals_9 buf12 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 16), (1, 64), 0), alpha=1, beta=1, out=buf12) del primals_11 buf15 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_act_2], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_8.run(buf12, buf15, 4, 16, grid=grid(4), stream=stream0) del buf12 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 2, 4, 4), (32, 1, 8, 2)) buf17 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) buf22 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool) # Topologically Sorted Source Nodes: [conv2d_4, x_val], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_9.run(buf16, primals_13, buf17, buf22, 8, 16, grid=grid(8, 16), stream=stream0) del buf16 del primals_13 buf18 = reinterpret_tensor(buf10, (4, 64), (64, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf17, (4, 32), (32, 1), 0), reinterpret_tensor(primals_14, (32, 64), (1, 32), 0), out=buf18) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [x_val_2], Original ATen: [aten.relu] triton_poi_fused_relu_10.run(buf19, primals_15, 256, grid=grid(256), stream=stream0) del primals_15 buf20 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf19, reinterpret_tensor(primals_16, (64, 1), (1, 64), 0), out=buf20) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [x_val_3], Original ATen: [aten.tanh] triton_poi_fused_tanh_11.run(buf21, primals_17, 4, grid=grid(4), stream=stream0) del primals_17 return (buf15, buf21, buf0, buf1, buf2, buf3, primals_8, primals_12, buf5, buf7, buf9, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), buf15, reinterpret_tensor(buf17, (4, 32), (32, 1), 0), buf19, buf21, primals_16, primals_14, buf22, primals_10, buf23, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((2, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): """policy-value network module""" def __init__(self, board_width, board_height): super(Net, self).__init__() self.board_width = board_width self.board_height = board_height self.conv1 = nn.Conv2d(4, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.act_conv1 = nn.Conv2d(128, 4, kernel_size=1) self.act_fc1 = nn.Linear(4 * board_width * board_height, board_width * board_height) self.val_conv1 = nn.Conv2d(128, 2, kernel_size=1) self.val_fc1 = nn.Linear(2 * board_width * board_height, 64) self.val_fc2 = nn.Linear(64, 1) def forward(self, state_input): x = F.relu(self.conv1(state_input)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x_act = F.relu(self.act_conv1(x)) x_act = x_act.view(-1, 4 * self.board_width * self.board_height) x_act = F.log_softmax(self.act_fc1(x_act)) x_val = F.relu(self.val_conv1(x)) x_val = x_val.view(-1, 2 * self.board_width * self.board_height) x_val = F.relu(self.val_fc1(x_val)) x_val = F.tanh(self.val_fc2(x_val)) return x_act, x_val def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'board_width': 4, 'board_height': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask) @triton.jit def triton_per_fused__log_softmax_8(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 16 * x0), tmp12, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 8 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 2 * x2 + 32 * y1), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_tanh_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 64), (64, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_13, (2,), (1,)) assert_size_stride(primals_14, (64, 32), (32, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (1, 64), (64, 1)) assert_size_stride(primals_17, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(128, 9)](primals_1, buf0, 128, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_4[grid(2048)](buf5, primals_2, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_5[grid(4096)](buf7, primals_5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(8192)](buf9, primals_7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4)) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_7[grid(16, 16)]( buf10, primals_9, buf11, buf23, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 16), (1, 64), 0), alpha=1, beta=1, out=buf12) del primals_11 buf15 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_per_fused__log_softmax_8[grid(4)](buf12, buf15, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf12 buf16 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 2, 4, 4), (32, 1, 8, 2)) buf17 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) buf22 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_9[grid(8, 16)]( buf16, primals_13, buf17, buf22, 8, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del buf16 del primals_13 buf18 = reinterpret_tensor(buf10, (4, 64), (64, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf17, (4, 32), (32, 1), 0), reinterpret_tensor(primals_14, (32, 64), (1, 32), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_relu_10[grid(256)](buf19, primals_15, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_15 buf20 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf19, reinterpret_tensor(primals_16, (64, 1), (1, 64), 0), out=buf20) buf21 = buf20 del buf20 triton_poi_fused_tanh_11[grid(4)](buf21, primals_17, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_17 return (buf15, buf21, buf0, buf1, buf2, buf3, primals_8, primals_12, buf5, buf7, buf9, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), buf15, reinterpret_tensor(buf17, (4, 32), (32, 1), 0), buf19, buf21, primals_16, primals_14, buf22, primals_10, buf23) class NetNew(nn.Module): """policy-value network module""" def __init__(self, board_width, board_height): super(NetNew, self).__init__() self.board_width = board_width self.board_height = board_height self.conv1 = nn.Conv2d(4, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.act_conv1 = nn.Conv2d(128, 4, kernel_size=1) self.act_fc1 = nn.Linear(4 * board_width * board_height, board_width * board_height) self.val_conv1 = nn.Conv2d(128, 2, kernel_size=1) self.val_fc1 = nn.Linear(2 * board_width * board_height, 64) self.val_fc2 = nn.Linear(64, 1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.act_conv1.weight primals_9 = self.act_conv1.bias primals_10 = self.act_fc1.weight primals_11 = self.act_fc1.bias primals_12 = self.val_conv1.weight primals_13 = self.val_conv1.bias primals_14 = self.val_fc1.weight primals_15 = self.val_fc1.bias primals_16 = self.val_fc2.weight primals_17 = self.val_fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1]
ZiwenZhuang/AlphaZero_Gomoku
Net
false
12,043
[ "MIT" ]
0
72db1c3eda1f6133da24c924da6032ea3569076e
https://github.com/ZiwenZhuang/AlphaZero_Gomoku/tree/72db1c3eda1f6133da24c924da6032ea3569076e
ResBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ia/ciaqxcwk4kpij3cqfsh3kugwj4vb2k4au4dshwoxpl4qlvjwwxrz.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # out_1 => convert_element_type_1 # Graph fragment: # %convert_element_type_1 : [num_users=7] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/aw/cawr6lnrfyvw4oaprxasgpeu3nqbaqfype73ukp7ovs3mthdbvhv.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # out_1 => add_1, clamp_max # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {}) # %clamp_max : [num_users=5] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 3), kwargs = {}) triton_poi_fused_add_clamp_1 = async_compile.triton('triton_poi_fused_add_clamp_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ex/cexxcsfzym3tnbdsltmqd73tdjfjaza7zxlxaz7iyzvcp4xkk2za.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] # Source node to ATen node mapping: # out_1 => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_1, sub, sub_2 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=5] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nv/cnvfvayghdoicvmmoqduxx5rpguwh4mnzhcq5u3wekthrifcqav3.py # Topologically Sorted Source Nodes: [conv2d, out, out_1, x], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # conv2d => convolution # out => gt, mul, where # out_1 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_3, mul_4, mul_5, sub_3, sub_4, sub_6 # x => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_11, add_12, add_13, mul_10, mul_11, mul_9, sub_10, sub_11, sub_13 # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_3), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_4), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {}) # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_2), kwargs = {}) # %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_9), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_2), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_10), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %add_11), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_3), kwargs = {}) # %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_11), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*i64', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*i64', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x5 = (xindex // 4) x2 = (xindex // 4) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.2 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp18 = tmp17 + tmp1 tmp19 = tmp17 < 0 tmp20 = tl.where(tmp19, tmp18, tmp17) tmp21 = tl.load(in_ptr2 + (tmp20 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last') tmp22 = tmp21 + tmp10 tmp23 = tmp22 > tmp12 tmp24 = tmp22 * tmp14 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tmp25 - tmp16 tmp28 = tmp26 * tmp27 tmp29 = tmp16 + tmp28 tmp31 = tmp30 + tmp1 tmp32 = tmp30 < 0 tmp33 = tl.where(tmp32, tmp31, tmp30) tmp34 = tl.load(in_ptr2 + (tmp8 + (4*tmp33) + (16*x5)), xmask, eviction_policy='evict_last') tmp35 = tmp34 + tmp10 tmp36 = tmp35 > tmp12 tmp37 = tmp35 * tmp14 tmp38 = tl.where(tmp36, tmp35, tmp37) tmp39 = tl.load(in_ptr2 + (tmp20 + (4*tmp33) + (16*x5)), xmask, eviction_policy='evict_last') tmp40 = tmp39 + tmp10 tmp41 = tmp40 > tmp12 tmp42 = tmp40 * tmp14 tmp43 = tl.where(tmp41, tmp40, tmp42) tmp44 = tmp43 - tmp38 tmp45 = tmp44 * tmp27 tmp46 = tmp38 + tmp45 tmp47 = tmp46 - tmp29 tmp49 = tmp47 * tmp48 tmp50 = tmp29 + tmp49 tmp51 = tl.load(in_ptr8 + (tmp8 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last') tmp52 = tl.load(in_ptr8 + (tmp20 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last') tmp53 = tmp52 - tmp51 tmp54 = tmp53 * tmp27 tmp55 = tmp51 + tmp54 tmp56 = tl.load(in_ptr8 + (tmp8 + (4*tmp33) + (16*x5)), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr8 + (tmp20 + (4*tmp33) + (16*x5)), xmask, eviction_policy='evict_last') tmp58 = tmp57 - tmp56 tmp59 = tmp58 * tmp27 tmp60 = tmp56 + tmp59 tmp61 = tmp60 - tmp55 tmp62 = tmp61 * tmp48 tmp63 = tmp55 + tmp62 tl.store(in_out_ptr0 + (x4), tmp50, xmask) tl.store(in_out_ptr1 + (x4), tmp63, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5q/c5qrop55suvpqojfx24xrvjlodhijl7tsdqplzshgs2ylmmfkwz4.py # Topologically Sorted Source Nodes: [conv2d_1, out_2, out_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # out_2 => gt_1, mul_6, where_1 # out_3 => add_14 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_6, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_6), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %convolution_2), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_out_ptr0 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp9 = tmp7 + tmp8 tmp10 = tmp7 > tmp3 tl.store(in_out_ptr0 + (x3), tmp9, xmask) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s5/cs5o65bd5qls62jyofgsrvlsxw6ku55z7wudf4ziucds3lbj4c7j.py # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # conv2d => convolution # out => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((2, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._to_copy] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_0.run(buf1, 2, grid=grid(2), stream=stream0) buf2 = empty_strided_cuda((2, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_1.run(buf2, 2, grid=grid(2), stream=stream0) buf3 = empty_strided_cuda((2, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_0.run(buf3, 2, grid=grid(2), stream=stream0) buf4 = empty_strided_cuda((2, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_1.run(buf4, 2, grid=grid(2), stream=stream0) buf5 = empty_strided_cuda((2, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2.run(buf5, 2, grid=grid(2), stream=stream0) buf7 = empty_strided_cuda((2, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2.run(buf7, 2, grid=grid(2), stream=stream0) buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf9 = buf8; del buf8 # reuse buf11 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [conv2d, out, out_1, x], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3.run(buf9, buf12, buf1, buf3, buf0, primals_2, buf4, buf5, buf2, buf7, primals_3, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 2, 2), (16, 4, 2, 1)) # Topologically Sorted Source Nodes: [skip], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 4, 2, 2), (16, 4, 2, 1)) buf14 = buf13; del buf13 # reuse buf15 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, out_2, out_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward] triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4.run(buf14, buf10, primals_5, buf15, 64, grid=grid(64), stream=stream0) del buf10 del primals_5 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf0, primals_2, buf16, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf12, buf15, buf16, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F class ResBlock(nn.Module): """Residual block with bilinear upsampling/downsampling. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. mode (str): Upsampling/downsampling mode. Options: down | up. Default: down. """ def __init__(self, in_channels, out_channels, mode='down'): super(ResBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1) self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False) if mode == 'down': self.scale_factor = 0.5 elif mode == 'up': self.scale_factor = 2 def forward(self, x): out = F.leaky_relu_(self.conv1(x), negative_slope=0.2) out = F.interpolate(out, scale_factor=self.scale_factor, mode= 'bilinear', align_corners=False) out = F.leaky_relu_(self.conv2(out), negative_slope=0.2) x = F.interpolate(x, scale_factor=self.scale_factor, mode= 'bilinear', align_corners=False) skip = self.skip(x) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x5 = xindex // 4 x2 = xindex // 4 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.2 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp18 = tmp17 + tmp1 tmp19 = tmp17 < 0 tmp20 = tl.where(tmp19, tmp18, tmp17) tmp21 = tl.load(in_ptr2 + (tmp20 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp22 = tmp21 + tmp10 tmp23 = tmp22 > tmp12 tmp24 = tmp22 * tmp14 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tmp25 - tmp16 tmp28 = tmp26 * tmp27 tmp29 = tmp16 + tmp28 tmp31 = tmp30 + tmp1 tmp32 = tmp30 < 0 tmp33 = tl.where(tmp32, tmp31, tmp30) tmp34 = tl.load(in_ptr2 + (tmp8 + 4 * tmp33 + 16 * x5), xmask, eviction_policy='evict_last') tmp35 = tmp34 + tmp10 tmp36 = tmp35 > tmp12 tmp37 = tmp35 * tmp14 tmp38 = tl.where(tmp36, tmp35, tmp37) tmp39 = tl.load(in_ptr2 + (tmp20 + 4 * tmp33 + 16 * x5), xmask, eviction_policy='evict_last') tmp40 = tmp39 + tmp10 tmp41 = tmp40 > tmp12 tmp42 = tmp40 * tmp14 tmp43 = tl.where(tmp41, tmp40, tmp42) tmp44 = tmp43 - tmp38 tmp45 = tmp44 * tmp27 tmp46 = tmp38 + tmp45 tmp47 = tmp46 - tmp29 tmp49 = tmp47 * tmp48 tmp50 = tmp29 + tmp49 tmp51 = tl.load(in_ptr8 + (tmp8 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp52 = tl.load(in_ptr8 + (tmp20 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp53 = tmp52 - tmp51 tmp54 = tmp53 * tmp27 tmp55 = tmp51 + tmp54 tmp56 = tl.load(in_ptr8 + (tmp8 + 4 * tmp33 + 16 * x5), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr8 + (tmp20 + 4 * tmp33 + 16 * x5), xmask, eviction_policy='evict_last') tmp58 = tmp57 - tmp56 tmp59 = tmp58 * tmp27 tmp60 = tmp56 + tmp59 tmp61 = tmp60 - tmp55 tmp62 = tmp61 * tmp48 tmp63 = tmp55 + tmp62 tl.store(in_out_ptr0 + x4, tmp50, xmask) tl.store(in_out_ptr1 + x4, tmp63, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp9 = tmp7 + tmp8 tmp10 = tmp7 > tmp3 tl.store(in_out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((2, 1), (1, 1), torch.int64) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(2)](buf1, 2, XBLOCK=2, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((2, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_1[grid(2)](buf2, 2, XBLOCK=2, num_warps= 1, num_stages=1) buf3 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused__to_copy_0[grid(2)](buf3, 2, XBLOCK=2, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused_add_clamp_1[grid(2)](buf4, 2, XBLOCK=2, num_warps= 1, num_stages=1) buf5 = empty_strided_cuda((2,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(2)](buf5, 2, XBLOCK=2, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((2, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(2)](buf7, 2, XBLOCK=2, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf9 = buf8 del buf8 buf11 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf12 = buf11 del buf11 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3[ grid(64)](buf9, buf12, buf1, buf3, buf0, primals_2, buf4, buf5, buf2, buf7, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 2, 2), (16, 4, 2, 1)) buf13 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 4, 2, 2), (16, 4, 2, 1)) buf14 = buf13 del buf13 buf15 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4[grid (64)](buf14, buf10, primals_5, buf15, 64, XBLOCK=64, num_warps= 1, num_stages=1) del buf10 del primals_5 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256) ](buf0, primals_2, buf16, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf12, buf15, buf16) class ResBlockNew(nn.Module): """Residual block with bilinear upsampling/downsampling. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. mode (str): Upsampling/downsampling mode. Options: down | up. Default: down. """ def __init__(self, in_channels, out_channels, mode='down'): super(ResBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1) self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False) if mode == 'down': self.scale_factor = 0.5 elif mode == 'up': self.scale_factor = 2 def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.skip.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
abhishekm47/GFPGAN
ResBlock
false
12,044
[ "BSD-3-Clause" ]
0
39d063749433b38d98c75740b052934ae8bc80f6
https://github.com/abhishekm47/GFPGAN/tree/39d063749433b38d98c75740b052934ae8bc80f6
NormLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/tx/ctxgfdomqe65rl3biiiivn2iqu44cw3hqgbqow2ioi4jjo2s4zi6.py # Topologically Sorted Source Nodes: [linalg_norm, ny, linalg_norm_1, nY, diff, getitem, square, getitem_1, mul, mean], Original ATen: [aten.linalg_vector_norm, aten.div, aten.sub, aten.index, aten.pow, aten.mul, aten.mean] # Source node to ATen node mapping: # diff => sub # getitem => index # getitem_1 => index_1 # linalg_norm => pow_1, pow_2, sum_1 # linalg_norm_1 => pow_3, pow_4, sum_2 # mean => mean # mul => mul # nY => div_1 # ny => div # square => pow_5 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, 5.0), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_4, 5.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%sub, [%arg2_1]), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index, 2), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg3_1, [%arg2_1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_5, %index_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = (rindex // 16) r0 = rindex % 16 r4 = rindex r2 = rindex % 4 tmp0 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr0 + (r2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (r0 + (64*tmp4)), None) tmp7 = tmp6 * tmp6 tmp8 = tl.load(in_ptr1 + (16 + r0 + (64*tmp4)), None) tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr1 + (32 + r0 + (64*tmp4)), None) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr1 + (48 + r0 + (64*tmp4)), None) tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = 0.2 tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr2 + (r0 + (64*tmp4)), None) tmp21 = tmp20 * tmp20 tmp22 = tl.load(in_ptr2 + (16 + r0 + (64*tmp4)), None) tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tl.load(in_ptr2 + (32 + r0 + (64*tmp4)), None) tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tl.load(in_ptr2 + (48 + r0 + (64*tmp4)), None) tmp29 = tmp28 * tmp28 tmp30 = tmp27 + tmp29 tmp31 = libdevice.sqrt(tmp30) tmp32 = tmp31 * tmp18 tmp33 = tmp19 - tmp32 tmp34 = tmp33 * tmp33 tmp36 = tmp35 + tmp1 tmp37 = tmp35 < 0 tmp38 = tl.where(tmp37, tmp36, tmp35) tl.device_assert((0 <= tmp38) & (tmp38 < 4), "index out of bounds: 0 <= tmp38 < 4") tmp40 = tl.load(in_ptr3 + (tmp38), None, eviction_policy='evict_last') tmp41 = tmp40.to(tl.float32) tmp42 = tmp34 * tmp41 tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK]) tmp45 = tl.sum(tmp43, 1)[:, None] tmp46 = 64.0 tmp47 = tmp45 / tmp46 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp47, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, ), (1, )) assert_size_stride(arg3_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [linalg_norm, ny, linalg_norm_1, nY, diff, getitem, square, getitem_1, mul, mean], Original ATen: [aten.linalg_vector_norm, aten.div, aten.sub, aten.index, aten.pow, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0.run(buf2, arg2_1, arg0_1, arg1_1, arg3_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) arg3_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class NormLoss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y, Y, w, mask): ny = torch.linalg.norm(y, dim=1, keepdim=False) / 5.0 nY = torch.linalg.norm(Y, dim=1, keepdim=False) / 5.0 diff = ny - nY return torch.mean(torch.square(diff[mask]) * w[mask]) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.ones( [4], dtype=torch.int64), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 16 r0 = rindex % 16 r2 = rindex % 4 tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (r0 + 64 * tmp4), None) tmp7 = tmp6 * tmp6 tmp8 = tl.load(in_ptr1 + (16 + r0 + 64 * tmp4), None) tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr1 + (32 + r0 + 64 * tmp4), None) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr1 + (48 + r0 + 64 * tmp4), None) tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = 0.2 tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr2 + (r0 + 64 * tmp4), None) tmp21 = tmp20 * tmp20 tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * tmp4), None) tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tl.load(in_ptr2 + (32 + r0 + 64 * tmp4), None) tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tl.load(in_ptr2 + (48 + r0 + 64 * tmp4), None) tmp29 = tmp28 * tmp28 tmp30 = tmp27 + tmp29 tmp31 = libdevice.sqrt(tmp30) tmp32 = tmp31 * tmp18 tmp33 = tmp19 - tmp32 tmp34 = tmp33 * tmp33 tmp36 = tmp35 + tmp1 tmp37 = tmp35 < 0 tmp38 = tl.where(tmp37, tmp36, tmp35) tl.device_assert((0 <= tmp38) & (tmp38 < 4), 'index out of bounds: 0 <= tmp38 < 4') tmp40 = tl.load(in_ptr3 + tmp38, None, eviction_policy='evict_last') tmp41 = tmp40.to(tl.float32) tmp42 = tmp34 * tmp41 tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK]) tmp45 = tl.sum(tmp43, 1)[:, None] tmp46 = 64.0 tmp47 = tmp45 / tmp46 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp47, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4,), (1,)) assert_size_stride(arg3_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0[grid (1)](buf2, arg2_1, arg0_1, arg1_1, arg3_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class NormLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
acycliq/cellpose
NormLoss
false
12,045
[ "BSD-3-Clause" ]
0
6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
https://github.com/acycliq/cellpose/tree/6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
ArcCosDotLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xe/cxegai7mxdqvp7c45meh6ibc2td5rt3rej7cesjfftxz23fjqrnu.py # Topologically Sorted Source Nodes: [mul, mul_1, dot, linalg_norm, linalg_norm_1, multiply, denom, truediv], Original ATen: [aten.mul, aten.add, aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # denom => add # dot => add_1 # linalg_norm => pow_1, pow_2, sum_1 # linalg_norm_1 => pow_3, pow_4, sum_2 # mul => mul_1 # mul_1 => mul_2 # multiply => mul # truediv => div # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %select_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-12), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp13 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp20 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp23 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp7 = tmp0 * tmp0 tmp8 = tmp3 * tmp3 tmp9 = tmp7 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tmp1 * tmp1 tmp18 = tmp4 * tmp4 tmp19 = tmp17 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp16 * tmp26 tmp28 = 1e-12 tmp29 = tmp27 + tmp28 tmp30 = tmp6 / tmp29 tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hv/chvtcoxm7frhmguhikt7vsam67ob7husr35wjmlplzukw2ebujrt.py # Topologically Sorted Source Nodes: [clip, acos, phasediff, getitem_4, square, getitem_5, mul_2, mean], Original ATen: [aten.clamp, aten.acos, aten.div, aten.index, aten.pow, aten.mul, aten.mean] # Source node to ATen node mapping: # acos => acos # clip => clamp_max, clamp_min # getitem_4 => index # getitem_5 => index_1 # mean => mean # mul_2 => mul_3 # phasediff => div_1 # square => pow_5 # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, -0.999999), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0.999999), kwargs = {}) # %acos : [num_users=1] = call_function[target=torch.ops.aten.acos.default](args = (%clamp_max,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%acos, 3.141549), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div_1, [%arg2_1]), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index, 2), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg3_1, [%arg2_1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_5, %index_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_3,), kwargs = {}) triton_per_fused_acos_clamp_div_index_mean_mul_pow_1 = async_compile.triton('triton_per_fused_acos_clamp_div_index_mean_mul_pow_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*i64', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_acos_clamp_div_index_mean_mul_pow_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_acos_clamp_div_index_mean_mul_pow_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = (rindex // 16) r3 = rindex % 16 r0 = rindex % 4 tmp0 = tl.load(in_ptr0 + (r2), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (r3 + (16*tmp4)), None) tmp7 = -0.999999 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = 0.999999 tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = libdevice.acos(tmp10) tmp12 = 0.3183143092786393 tmp13 = tmp11 * tmp12 tmp14 = tmp13 * tmp13 tmp16 = tmp15 + tmp1 tmp17 = tmp15 < 0 tmp18 = tl.where(tmp17, tmp16, tmp15) tl.device_assert((0 <= tmp18) & (tmp18 < 4), "index out of bounds: 0 <= tmp18 < 4") tmp20 = tl.load(in_ptr2 + (tmp18), None, eviction_policy='evict_last') tmp21 = tmp20.to(tl.float32) tmp22 = tmp14 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp26 = 64.0 tmp27 = tmp25 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp27, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, ), (1, )) assert_size_stride(arg3_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, dot, linalg_norm, linalg_norm_1, multiply, denom, truediv], Original ATen: [aten.mul, aten.add, aten.linalg_vector_norm, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [clip, acos, phasediff, getitem_4, square, getitem_5, mul_2, mean], Original ATen: [aten.clamp, aten.acos, aten.div, aten.index, aten.pow, aten.mul, aten.mean] triton_per_fused_acos_clamp_div_index_mean_mul_pow_1.run(buf2, arg2_1, buf0, arg3_1, 1, 64, grid=grid(1), stream=stream0) del arg2_1 del arg3_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) arg3_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class ArcCosDotLoss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y, w, mask): eps = 1e-12 denom = torch.multiply(torch.linalg.norm(x, dim=1), torch.linalg. norm(y, dim=1)) + eps dot = x[:, 0, :, :] * y[:, 0, :, :] + x[:, 1, :, :] * y[:, 1, :, :] phasediff = torch.acos(torch.clip(dot / denom, -0.999999, 0.999999) ) / 3.141549 return torch.mean(torch.square(phasediff[mask]) * w[mask]) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.ones( [4], dtype=torch.int64), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp7 = tmp0 * tmp0 tmp8 = tmp3 * tmp3 tmp9 = tmp7 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tmp1 * tmp1 tmp18 = tmp4 * tmp4 tmp19 = tmp17 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp16 * tmp26 tmp28 = 1e-12 tmp29 = tmp27 + tmp28 tmp30 = tmp6 / tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_per_fused_acos_clamp_div_index_mean_mul_pow_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex // 16 r3 = rindex % 16 r0 = rindex % 4 tmp0 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (r3 + 16 * tmp4), None) tmp7 = -0.999999 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = 0.999999 tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = libdevice.acos(tmp10) tmp12 = 0.3183143092786393 tmp13 = tmp11 * tmp12 tmp14 = tmp13 * tmp13 tmp16 = tmp15 + tmp1 tmp17 = tmp15 < 0 tmp18 = tl.where(tmp17, tmp16, tmp15) tl.device_assert((0 <= tmp18) & (tmp18 < 4), 'index out of bounds: 0 <= tmp18 < 4') tmp20 = tl.load(in_ptr2 + tmp18, None, eviction_policy='evict_last') tmp21 = tmp20.to(tl.float32) tmp22 = tmp14 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp26 = 64.0 tmp27 = tmp25 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4,), (1,)) assert_size_stride(arg3_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_acos_clamp_div_index_mean_mul_pow_1[grid(1)](buf2, arg2_1, buf0, arg3_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg2_1 del arg3_1 del buf0 return buf2, class ArcCosDotLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
acycliq/cellpose
ArcCosDotLoss
false
12,046
[ "BSD-3-Clause" ]
0
6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
https://github.com/acycliq/cellpose/tree/6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
WeightedLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/6r/c6rqvlfgt5waxukwktqx3pc4aweb55rjsya2lqudhzw3iquoifk5.py # Topologically Sorted Source Nodes: [sub, diff, square, mul, mean], Original ATen: [aten.sub, aten.div, aten.pow, aten.mul, aten.mean] # Source node to ATen node mapping: # diff => div # mean => mean # mul => mul # square => pow_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 5.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %arg2_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_div_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_div_mean_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp7 = tmp5 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, diff, square, mul, mean], Original ATen: [aten.sub, aten.div, aten.pow, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_div_mean_mul_pow_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class WeightedLoss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y, Y, w): diff = (y - Y) / 5.0 return torch.mean(torch.square(diff) * w) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp7 = tmp5 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class WeightedLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
acycliq/cellpose
WeightedLoss
false
12,047
[ "BSD-3-Clause" ]
0
6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
https://github.com/acycliq/cellpose/tree/6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
MyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ve/cve6hl7envvzbpav74dqxpj4bw72suixuf5ygqw4n5uy2ybxwewi.py # Topologically Sorted Source Nodes: [refactored_outputs_2, reshaped_nns_3, cost_tensor_new, add, max_1], Original ATen: [aten.repeat, aten.gather, aten.add, aten.max] # Source node to ATen node mapping: # add => add # cost_tensor_new => gather # max_1 => max_1 # refactored_outputs_2 => repeat_1 # reshaped_nns_3 => repeat # Graph fragment: # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%permute_2, [4, 1, 1]), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%permute_1, [1, 4, 1]), kwargs = {}) # %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%repeat_1, 2, %repeat), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%gather, %view), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add, 1), kwargs = {}) triton_poi_fused_add_gather_max_repeat_0 = async_compile.triton('triton_poi_fused_add_gather_max_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gather_max_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_gather_max_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp9 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = libdevice.trunc(tmp0) tmp2 = tmp1.to(tl.int64) tmp3 = tl.full([XBLOCK], 4, tl.int32) tmp4 = tmp2 + tmp3 tmp5 = tmp2 < 0 tmp6 = tl.where(tmp5, tmp4, tmp2) tl.device_assert(((0 <= tmp6) & (tmp6 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp6 < 4") tmp8 = tl.load(in_ptr1 + (4*tmp6), xmask, eviction_policy='evict_last') tmp10 = tmp8 + tmp9 tmp11 = tl.load(in_ptr1 + (1 + (4*tmp6)), xmask, eviction_policy='evict_last') tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tl.load(in_ptr1 + (2 + (4*tmp6)), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp14, tmp17) tmp19 = tl.load(in_ptr1 + (3 + (4*tmp6)), xmask, eviction_policy='evict_last') tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp18, tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uo/cuobt3k2nfaorwyrp3kc6rc624fzg2onwoykgyakktndejpyqdqd.py # Topologically Sorted Source Nodes: [b, b_max, b_min], Original ATen: [aten.sum, aten.max, aten.min] # Source node to ATen node mapping: # b => sum_1 # b_max => max_2 # b_min => min_1 # Graph fragment: # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [0]), kwargs = {}) # %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%sum_1,), kwargs = {}) # %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.default](args = (%sum_1,), kwargs = {}) triton_per_fused_max_min_sum_1 = async_compile.triton('triton_per_fused_max_min_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_min_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_min_sum_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = triton_helpers.max2(tmp7, 1)[:, None] tmp11 = triton_helpers.min2(tmp7, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3h/c3hg3x3f4jgkn4ifqxzl5dahdfk4hhv2xphl3ujvsa7wbauipe7z.py # Topologically Sorted Source Nodes: [b_1, truediv_1, add_2, add_3, sub_1, diff, cost, truediv_2], Original ATen: [aten.sub, aten.div, aten.mul, aten.mean, aten.rsub, aten.pow, aten.add] # Source node to ATen node mapping: # add_2 => mul # add_3 => mean_1 # b_1 => sub_2 # cost => add_1 # diff => pow_1 # sub_1 => sub_1 # truediv_1 => div_1 # truediv_2 => div_2 # Graph fragment: # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%max_2, %min_1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 1.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %arg2_1), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2, %mean_1), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, %pow_1), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 1.0), kwargs = {}) triton_per_fused_add_div_mean_mul_pow_rsub_sub_2 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_rsub_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_rsub_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_pow_rsub_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = (rindex // 4) r2 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4*r0)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r2), None) tmp11 = tl.load(in_ptr2 + (0)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, 1]) tmp13 = tl.load(in_ptr3 + (0)) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = 2.0 tmp9 = tmp8 - tmp7 tmp10 = tmp9 * tmp9 tmp15 = tmp12 - tmp14 tmp16 = 1.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 + tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yn/cynmu2p673p3mhzpyzhjnvyjat3d5nod67ahkoinsuuj5ijuzdda.py # Topologically Sorted Source Nodes: [booster_weights, booster_weights_1, booster_weights_2, booster_weights_3], Original ATen: [aten.mean, aten.rsub, aten.div, aten.clamp] # Source node to ATen node mapping: # booster_weights => mean # booster_weights_1 => sub # booster_weights_2 => div # booster_weights_3 => clamp_min # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%getitem, [0]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2, %mean), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 2), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, 0.5), kwargs = {}) triton_poi_fused_clamp_div_mean_rsub_3 = async_compile.triton('triton_poi_fused_clamp_div_mean_rsub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_mean_rsub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_mean_rsub_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 2.0 tmp10 = tmp9 - tmp8 tmp11 = 0.5 tmp12 = tmp10 * tmp11 tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [refactored_outputs_2, reshaped_nns_3, cost_tensor_new, add, max_1], Original ATen: [aten.repeat, aten.gather, aten.add, aten.max] stream0 = get_raw_stream(0) triton_poi_fused_add_gather_max_repeat_0.run(arg0_1, arg1_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [b, b_max, b_min], Original ATen: [aten.sum, aten.max, aten.min] triton_per_fused_max_min_sum_1.run(arg1_1, buf1, buf2, 1, 4, grid=grid(1), stream=stream0) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse buf6 = empty_strided_cuda((), (), torch.float32) buf7 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [b_1, truediv_1, add_2, add_3, sub_1, diff, cost, truediv_2], Original ATen: [aten.sub, aten.div, aten.mul, aten.mean, aten.rsub, aten.pow, aten.add] triton_per_fused_add_div_mean_mul_pow_rsub_sub_2.run(buf4, buf0, arg2_1, buf1, buf2, buf6, buf7, 1, 16, grid=grid(1), stream=stream0) del arg2_1 del buf1 del buf2 buf5 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [booster_weights, booster_weights_1, booster_weights_2, booster_weights_3], Original ATen: [aten.mean, aten.rsub, aten.div, aten.clamp] triton_poi_fused_clamp_div_mean_rsub_3.run(buf0, buf5, 4, grid=grid(4), stream=stream0) del buf0 return (buf6, buf4, buf7, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MyLoss(nn.Module): def __init__(self): super(MyLoss, self).__init__() None self.reduce_var = True pass """ weights has shape (n), multiply loss of point i with weights[i] """ def forward(self, outputs, y, weights, calculate_add=True): nns = torch.trunc(y) nns = nns.long() k = nns.shape[1] n = nns.shape[0] n = outputs.shape[0] batch_size = y.shape[0] n_bins = outputs.shape[1] diff = 0 booster_weights = 0 if calculate_add: reshaped_nns = torch.movedim(nns, 1, 0) del nns reshaped_nns = torch.unsqueeze(reshaped_nns, 2) reshaped_nns = torch.movedim(reshaped_nns, 1, 2) reshaped_nns = reshaped_nns.repeat(1, n_bins, 1) refactored_outputs = torch.unsqueeze(outputs, 0) refactored_outputs = torch.movedim(refactored_outputs, 1, 2) refactored_outputs = refactored_outputs.repeat(k, 1, 1) cost_tensor_new = torch.gather(refactored_outputs, 2, reshaped_nns) del reshaped_nns del refactored_outputs reshaped_outputs = torch.transpose(outputs, 0, 1) reshaped_outputs = torch.reshape(reshaped_outputs, (1, n_bins, n)) reshaped_outputs = reshaped_outputs[:, :, :batch_size] add = cost_tensor_new + reshaped_outputs del reshaped_outputs del cost_tensor_new add, idx = torch.max(add, 1) del idx booster_weights = torch.mean(add, 0) booster_weights = 2 - booster_weights booster_weights = booster_weights / 2 booster_weights = torch.clamp(booster_weights, min=0.5) add = add * weights add = torch.mean(add) diff = torch.square(2 - add) pass target_b = n / n_bins batch_outputs = outputs[:batch_size, :] b = torch.sum(batch_outputs, 0) b_max = torch.max(b) b_min = torch.min(b) b = b_max - b_min del batch_outputs cost = b / target_b + diff b = b.detach() diff = diff.detach() booster_weights = booster_weights.detach() return cost, diff, b / target_b, booster_weights pass def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_gather_max_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp9 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = libdevice.trunc(tmp0) tmp2 = tmp1.to(tl.int64) tmp3 = tl.full([XBLOCK], 4, tl.int32) tmp4 = tmp2 + tmp3 tmp5 = tmp2 < 0 tmp6 = tl.where(tmp5, tmp4, tmp2) tl.device_assert((0 <= tmp6) & (tmp6 < 4) | ~xmask, 'index out of bounds: 0 <= tmp6 < 4') tmp8 = tl.load(in_ptr1 + 4 * tmp6, xmask, eviction_policy='evict_last') tmp10 = tmp8 + tmp9 tmp11 = tl.load(in_ptr1 + (1 + 4 * tmp6), xmask, eviction_policy= 'evict_last') tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tl.load(in_ptr1 + (2 + 4 * tmp6), xmask, eviction_policy= 'evict_last') tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp14, tmp17) tmp19 = tl.load(in_ptr1 + (3 + 4 * tmp6), xmask, eviction_policy= 'evict_last') tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp18, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_per_fused_max_min_sum_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = triton_helpers.max2(tmp7, 1)[:, None] tmp11 = triton_helpers.min2(tmp7, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) @triton.jit def triton_per_fused_add_div_mean_mul_pow_rsub_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 r2 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + r2, None) tmp11 = tl.load(in_ptr2 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, 1]) tmp13 = tl.load(in_ptr3 + 0) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = 2.0 tmp9 = tmp8 - tmp7 tmp10 = tmp9 * tmp9 tmp15 = tmp12 - tmp14 tmp16 = 1.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 + tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) @triton.jit def triton_poi_fused_clamp_div_mean_rsub_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 2.0 tmp10 = tmp9 - tmp8 tmp11 = 0.5 tmp12 = tmp10 * tmp11 tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_add_gather_max_repeat_0[grid(16)](arg0_1, arg1_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) triton_per_fused_max_min_sum_1[grid(1)](arg1_1, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_div_mean_mul_pow_rsub_sub_2[grid(1)](buf4, buf0, arg2_1, buf1, buf2, buf6, buf7, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg2_1 del buf1 del buf2 buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_clamp_div_mean_rsub_3[grid(4)](buf0, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf0 return buf6, buf4, buf7, buf5 class MyLossNew(nn.Module): def __init__(self): super(MyLossNew, self).__init__() None self.reduce_var = True pass """ weights has shape (n), multiply loss of point i with weights[i] """ pass def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1], output[2], output[3]
abrar-fahim/ann-benchmarks
MyLoss
false
12,048
[ "MIT" ]
0
e5493ddda333bf6a930415566d4f1c697b439aca
https://github.com/abrar-fahim/ann-benchmarks/tree/e5493ddda333bf6a930415566d4f1c697b439aca
LocalizationNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # out => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gk/cgkeox2n6lb2433ix673nzbipppptbujeertkjua6x5dbykrv7hm.py # Topologically Sorted Source Nodes: [conv2d_1, out_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # out_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gq/cgqvpghyzwkbypvbvlyig6ohuplw6qvhn73hkwj6auj5e2m5mqio.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_3 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/d4/cd4s5ogbgu46xbdaa3oicwxi7l6pnddrap26pxiqzcpei77ta53h.py # Topologically Sorted Source Nodes: [conv2d_2, out_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # out_4 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a4/ca43wvja2n3mesrfuj54dcwx324bk23dhpnatmpi7kjryanvrx2z.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_5 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pw/cpwsgxngvwi42czirdy5mqcvlzqz5ddbdn3ytrocy4pgt7bp7hcr.py # Topologically Sorted Source Nodes: [conv2d_3, out_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # out_6 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sd/csdggutbpcmdsc77fz7wxfdqrm5gyg476bvq6mjhalojp2rgwf5a.py # Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_7 => _low_memory_max_pool2d_with_offsets_3, getitem_7 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7y/c7y7zlebmc5t5uuru343xxcvwljsadr3kaif7u7htmtl3dnir7z7.py # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.relu] # Source node to ATen node mapping: # out_9 => relu_4 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (1024, 1024), (1024, 1)) assert_size_stride(primals_11, (1024, ), (1, )) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, out_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 262144, grid=grid(262144), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 16, 16), (16384, 256, 16, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2, out_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 65536, grid=grid(65536), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.float32) buf11 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 8, 8), (4096, 64, 8, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_3, out_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf13, primals_9, 16384, grid=grid(16384), stream=stream0) del primals_9 buf14 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.int8) buf15 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_7.run(buf13, buf14, buf15, 4096, grid=grid(4096), stream=stream0) buf16 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 1024), (1, 1024), 0), out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.relu] triton_poi_fused_relu_8.run(buf17, primals_11, 4096, grid=grid(4096), stream=stream0) del primals_11 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf17, reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf18) del primals_13 return (reinterpret_tensor(buf18, (4, 2, 2), (4, 2, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0), buf17, primals_12, primals_10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class LocalizationNet(nn.Module): def __init__(self, num_bbox=2, num_digits=2): super(LocalizationNet, self).__init__() self.conv1 = nn.Conv2d(1, 64, 3, padding=1) self.conv2 = nn.Conv2d(64, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 64, 3, padding=1) self.conv4 = nn.Conv2d(64, 64, 3, padding=1) self.fc1 = nn.Linear(64 * 4 * 4, 1024) self.dropout = nn.Dropout(0.1) self.fc2 = nn.Linear(1024, num_bbox * num_digits) def forward(self, out): out = F.relu(self.conv1(out)) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = F.relu(self.conv3(out)) out = F.max_pool2d(out, 2) out = F.relu(self.conv4(out)) out = F.max_pool2d(out, 2) out = out.view(out.size()[0], -1) out = F.relu(self.dropout(self.fc1(out))) out = self.fc2(out) out = torch.reshape(out, tuple(out.size()[:-1]) + (2, 2)) return out def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy ='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (1024, 1024), (1024, 1)) assert_size_stride(primals_11, (1024,), (1,)) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf1, buf2, buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(262144)](buf5, primals_5, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(65536)](buf5, buf6, buf7, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 16, 16), (16384, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(65536)](buf9, primals_7, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch. float32) buf11 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf9, buf10, buf11, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 8, 8), (4096, 64, 8, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_6[grid(16384)](buf13, primals_9, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.int8) buf15 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_7[grid(4096)](buf13, buf14, buf15, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf16 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0 ), reinterpret_tensor(primals_10, (1024, 1024), (1, 1024), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_8[grid(4096)](buf17, primals_11, 4096, XBLOCK =256, num_warps=4, num_stages=1) del primals_11 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf17, reinterpret_tensor( primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf18) del primals_13 return (reinterpret_tensor(buf18, (4, 2, 2), (4, 2, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, reinterpret_tensor( buf15, (4, 1024), (1024, 1), 0), buf17, primals_12, primals_10) class LocalizationNetNew(nn.Module): def __init__(self, num_bbox=2, num_digits=2): super(LocalizationNetNew, self).__init__() self.conv1 = nn.Conv2d(1, 64, 3, padding=1) self.conv2 = nn.Conv2d(64, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 64, 3, padding=1) self.conv4 = nn.Conv2d(64, 64, 3, padding=1) self.fc1 = nn.Linear(64 * 4 * 4, 1024) self.dropout = nn.Dropout(0.1) self.fc2 = nn.Linear(1024, num_bbox * num_digits) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_12 = self.fc2.weight primals_13 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
YIFEI-MA/MultiDigitRecognition
LocalizationNet
false
12,049
[ "MIT" ]
0
f1f9567c31102ccdc7464a35b8a7c533b5d46734
https://github.com/YIFEI-MA/MultiDigitRecognition/tree/f1f9567c31102ccdc7464a35b8a7c533b5d46734
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cx/ccxj5mx36tl2zezvd7cfl4qjbxj2iebjymfhg7g4wimudrlvd4ab.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 132 x1 = (xindex // 132) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((128*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 132, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-128) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mt/cmttmov7q7l6eww5wgel4xbdmlbbf53sgwydh2ovfk4ks65mt3ki.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/j3/cj3skkmeof32bawbrph6liql2ib2zomhrw6fu7ygx6aiswlahhrr.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_2 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nd/cndyd4t5wyyfbodpgmx4s5bci3qcjt2hdvlzr44yh2pq2vkofyng.py # Topologically Sorted Source Nodes: [xs], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # xs => relu # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (64, 132), (132, 1)) assert_size_stride(primals_6, (64, ), (1, )) assert_size_stride(primals_7, (32, 64), (64, 1)) assert_size_stride(primals_8, (32, ), (1, )) assert_size_stride(primals_9, (1, 32), (32, 1)) assert_size_stride(primals_10, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 132), (132, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 528, grid=grid(528), stream=stream0) del primals_4 buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (132, 64), (1, 132), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_6, 256, grid=grid(256), stream=stream0) del primals_6 buf4 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_7, (64, 32), (1, 64), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf5, primals_8, 128, grid=grid(128), stream=stream0) del primals_8 buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, buf5, reinterpret_tensor(primals_9, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf7) del primals_10 buf8 = empty_strided_cuda((4, 128), (128, 1), torch.bool) # Topologically Sorted Source Nodes: [xs], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf0, primals_2, buf8, 512, grid=grid(512), stream=stream0) del buf0 del primals_2 return (buf7, primals_3, buf1, buf3, buf5, primals_9, primals_7, primals_5, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, 132), (132, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=128, fc2_units=64, fc3_units=32): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, fc3_units) self.fc4 = nn.Linear(fc3_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(*hidden_init(self.fc3)) self.fc4.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.relu(self.fcs1(state)) x = torch.cat((xs, action), dim=1) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) return self.fc4(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 132 x1 = xindex // 132 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (128 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 132, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-128 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (64, 132), (132, 1)) assert_size_stride(primals_6, (64,), (1,)) assert_size_stride(primals_7, (32, 64), (64, 1)) assert_size_stride(primals_8, (32,), (1,)) assert_size_stride(primals_9, (1, 32), (32, 1)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 132), (132, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(528)](buf0, primals_2, primals_4, buf1, 528, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (132, 64), (1, 132), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(256)](buf3, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_7, (64, 32), (1, 64), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(128)](buf5, primals_8, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_10, buf5, reinterpret_tensor(primals_9, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf7) del primals_10 buf8 = empty_strided_cuda((4, 128), (128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(512)](buf0, primals_2, buf8, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return (buf7, primals_3, buf1, buf3, buf5, primals_9, primals_7, primals_5, buf8) def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=128, fc2_units=64, fc3_units=32): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, fc3_units) self.fc4 = nn.Linear(fc3_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(*hidden_init(self.fc3)) self.fc4.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_9 = self.fc4.weight primals_10 = self.fc4.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
adriaciurana/adriaciurana-udacity-project-2
Critic
false
12,050
[ "MIT" ]
0
a0af7086df586b537cd10a880f1d354240ff31a5
https://github.com/adriaciurana/adriaciurana-udacity-project-2/tree/a0af7086df586b537cd10a880f1d354240ff31a5
GraphConvolution
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/l4/cl4boort6vfsvh6h6bfd4lck36kbmtipkqcrnhckuuxer6sfib77.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.zeros] # Source node to ATen node mapping: # output => full # Graph fragment: # %full : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_0 = async_compile.triton('triton_poi_fused_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ea/ceayeuekgepm4woy7dpyqohzstm5nlxvbikhx3vedydg6yb7scqe.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add, aten.transpose] # Source node to ATen node mapping: # output_1 => add, permute_5 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_3, %primals_4), kwargs = {}) # %permute_5 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%permute_4, [1, 0]), kwargs = {}) triton_poi_fused_add_transpose_1 = async_compile.triton('triton_poi_fused_add_transpose_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_transpose_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_transpose_1(in_ptr0, in_ptr1, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr1 + (y0 + (4*x1)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.zeros] stream0 = get_raw_stream(0) triton_poi_fused_zeros_0.run(buf1, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [output], Original ATen: [aten._sparse_addmm] buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1, (4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), beta=0) del buf0 buf3 = buf2 del buf2 buf5 = reinterpret_tensor(buf1, (4, 4), (1, 4), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add, aten.transpose] triton_poi_fused_add_transpose_1.run(buf3, primals_4, buf5, 4, 4, grid=grid(4, 4), stream=stream0) del buf3 del primals_4 return (buf5, primals_3, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init as init class GraphConvolution(nn.Module): def __init__(self, input_dim, output_dim, use_bias=True): """ 图卷积: L*X*theta :param input_dim: int 节点输入特征的维度 :param out_put_dim: int 输出特征维度 :param use_bias: boolean | optional 是否使用偏置 """ super(GraphConvolution, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.use_bias = use_bias self.weight = nn.Parameter(torch.Tensor(input_dim, output_dim)) if self.use_bias: self.bias = nn.Parameter(torch.Tensor(output_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight) if self.use_bias: init.zeros_(self.bias) def forward(self, adjacency, input_feature): """ 邻接矩阵是稀疏矩阵,因此在计算时使用稀疏矩阵乘法 :param adjacency: 邻接矩阵 :param input_feature: 输入特征 :return: """ support = torch.mm(input_feature, self.weight) output = torch.sparse.mm(adjacency, support) if self.use_bias: output += self.bias return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.output_features) + ')' def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_transpose_1(in_ptr0, in_ptr1, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr1 + (y0 + 4 * x1), tmp2, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1, (4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), beta=0) del buf0 buf3 = buf2 del buf2 buf5 = reinterpret_tensor(buf1, (4, 4), (1, 4), 0) del buf1 triton_poi_fused_add_transpose_1[grid(4, 4)](buf3, primals_4, buf5, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del buf3 del primals_4 return buf5, primals_3, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GraphConvolutionNew(nn.Module): def __init__(self, input_dim, output_dim, use_bias=True): """ 图卷积: L*X*theta :param input_dim: int 节点输入特征的维度 :param out_put_dim: int 输出特征维度 :param use_bias: boolean | optional 是否使用偏置 """ super(GraphConvolutionNew, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.use_bias = use_bias self.weight = nn.Parameter(torch.Tensor(input_dim, output_dim)) if self.use_bias: self.bias = nn.Parameter(torch.Tensor(output_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight) if self.use_bias: init.zeros_(self.bias) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.output_features) + ')' def forward(self, input_0, input_1): primals_1 = self.weight primals_4 = self.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
acproject/knowledge-graph-learning
GraphConvolution
false
12,051
[ "MIT" ]
0
fa62db6720f6da8e35de01b68acf82f1a367671f
https://github.com/acproject/knowledge-graph-learning/tree/fa62db6720f6da8e35de01b68acf82f1a367671f
eSEModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] # Source node to ATen node mapping: # x => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/co/ccowauib7vy5gm7p4vj4ao45thoxwz7gg4fphlxqgn5idb7igf7i.py # Topologically Sorted Source Nodes: [x_1, add, relu6, x_2, mul], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # relu6 => clamp_max, clamp_min # x_1 => convolution # x_2 => div # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, 3.0), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {}) triton_poi_fused_add_convolution_div_hardtanh_mul_1 = async_compile.triton('triton_poi_fused_add_convolution_div_hardtanh_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_hardtanh_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 16) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 3.0 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tmp10 = 0.16666666666666666 tmp11 = tmp9 * tmp10 tmp12 = tmp0 * tmp11 tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jf/cjfxg4qmkc47we3c3cd47pg6lk6t4idzjkbdf3zyoyk4nr3a7ktb.py # Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward] # Source node to ATen node mapping: # add => add # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, 3.0), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add, 0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add, 6), kwargs = {}) # %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {}) triton_poi_fused_add_convolution_hardtanh_backward_2 = async_compile.triton('triton_poi_fused_add_convolution_hardtanh_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_hardtanh_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 3.0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp7 = 6.0 tmp8 = tmp4 >= tmp7 tmp9 = tmp6 | tmp8 tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, add, relu6, x_2, mul], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul] triton_poi_fused_add_convolution_div_hardtanh_mul_1.run(primals_1, buf2, primals_3, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward] triton_poi_fused_add_convolution_hardtanh_backward_2.run(buf2, primals_3, buf4, 16, grid=grid(16), stream=stream0) del buf2 del primals_3 return (buf3, primals_1, primals_2, buf1, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torch.nn.parallel class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 class eSEModule(nn.Module): def __init__(self, channel, reduction=4): super(eSEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) self.hsigmoid = Hsigmoid() def forward(self, x): input = x x = self.avg_pool(x) x = self.fc(x) x = self.hsigmoid(x) return input * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 3.0 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tmp10 = 0.16666666666666666 tmp11 = tmp9 * tmp10 tmp12 = tmp0 * tmp11 tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 3.0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp7 = 6.0 tmp8 = tmp4 >= tmp7 tmp9 = tmp6 | tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_convolution_div_hardtanh_mul_1[grid(256)]( primals_1, buf2, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) triton_poi_fused_add_convolution_hardtanh_backward_2[grid(16)](buf2, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 del primals_3 return buf3, primals_1, primals_2, buf1, buf4 class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 class eSEModuleNew(nn.Module): def __init__(self, channel, reduction=4): super(eSEModuleNew, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) self.hsigmoid = Hsigmoid() def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
XDong18/AdelaiDet
eSEModule
false
12,052
[ "BSD-2-Clause" ]
0
837cd1078923892fe6e84ac29fd0963f1b2c474f
https://github.com/XDong18/AdelaiDet/tree/837cd1078923892fe6e84ac29fd0963f1b2c474f
SingleBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/hs/chsecdba4f7bovvhq2kaknnp2qju7ddv6wkb4gfainbyo4cqbbia.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_23 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr1 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/32/c32ey56u44hrliqc4irsqofvv4b2jn2n7mjjlw7pc6izjedkueke.py # Topologically Sorted Source Nodes: [v_trans_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # v_trans_1 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 x1 = (xindex // 12) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rc/crcy3ekcmk55inse2exukl7ikkuzgffylu2d27ocqet76u3yr4cc.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul => bmm # Graph fragment: # %bmm : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {}) triton_poi_fused_bmm_2 = async_compile.triton('triton_poi_fused_bmm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hr/chra4rrpxnuwkofeqtn26wi6b5664cbswl4lakiekmo4l2cxx5pa.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul => bmm # Graph fragment: # %bmm : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand, %expand_1), kwargs = {}) triton_poi_fused_bmm_3 = async_compile.triton('triton_poi_fused_bmm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (12*x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6i/c6i3d7y6walwzssraacw7xljfp6l4qtclc7ye65ufzoallqmst7v.py # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_4 => bmm_4 # Graph fragment: # %bmm_4 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_10, %expand_11), kwargs = {}) triton_poi_fused_bmm_4 = async_compile.triton('triton_poi_fused_bmm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (5 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wt/cwtpuvah7osg2oqg6zwrgkrkfb7omhxymlcnm64ybtuijjpltava.py # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_4 => bmm_4 # Graph fragment: # %bmm_4 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_10, %expand_11), kwargs = {}) triton_poi_fused_bmm_5 = async_compile.triton('triton_poi_fused_bmm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bq/cbqkaltuvwowq3s6vzdjqepk4mbcahi7etep6rqnzr5yt2net2nh.py # Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_8 => bmm_8 # Graph fragment: # %bmm_8 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_20, %expand_21), kwargs = {}) triton_poi_fused_bmm_6 = async_compile.triton('triton_poi_fused_bmm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (6 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wf/cwfnaxwqjkpl3cze4kw7i3islayjbzsmkutn3auz4mw6luxbjyet.py # Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_8 => bmm_8 # Graph fragment: # %bmm_8 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_20, %expand_21), kwargs = {}) triton_poi_fused_bmm_7 = async_compile.triton('triton_poi_fused_bmm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nt/cntnztpksr7tdizfs3rzmdupgs3yndactjr6s7takeqpny77yq4f.py # Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_12 => bmm_12 # Graph fragment: # %bmm_12 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_30, %expand_31), kwargs = {}) triton_poi_fused_bmm_8 = async_compile.triton('triton_poi_fused_bmm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (7 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vz/cvzxtlhcjoc7snrvho4qrnmeuxgol47ildzoks2quun4bxgj4o5v.py # Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_12 => bmm_12 # Graph fragment: # %bmm_12 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_30, %expand_31), kwargs = {}) triton_poi_fused_bmm_9 = async_compile.triton('triton_poi_fused_bmm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mh/cmhyeem2ju73nvdgvgmak2jga27otpmletat4tlilg7lkg3wwlj3.py # Topologically Sorted Source Nodes: [eq, masked_fill, interMAF_q2v, masked_fill_2, interMAF_q2v_1, masked_fill_4, interMAF_q2v_2, masked_fill_6, interMAF_q2v_3], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # eq => eq # interMAF_q2v => exp, sum_1 # interMAF_q2v_1 => exp_2, sum_3 # interMAF_q2v_2 => exp_4, sum_5 # interMAF_q2v_3 => exp_6, sum_7 # masked_fill => full_default, where # masked_fill_2 => where_2 # masked_fill_4 => where_4 # masked_fill_6 => where_6 # Graph fragment: # %eq : [num_users=32] = call_function[target=torch.ops.aten.eq.Scalar](args = (%expand_2, 0), kwargs = {}) # %full_default : [num_users=64] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm), kwargs = {}) # %mul_tensor_63 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {}) # %amax_default_63 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_63, [2], True), kwargs = {}) # %sub_tensor_63 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_63, %amax_default_63), kwargs = {}) # %div_tensor_63 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_63, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_63,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm_4), kwargs = {}) # %mul_tensor_61 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_2, 1), kwargs = {}) # %amax_default_61 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_61, [2], True), kwargs = {}) # %sub_tensor_61 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_61, %amax_default_61), kwargs = {}) # %div_tensor_61 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_61, 1.0), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_61,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [2], True), kwargs = {}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm_8), kwargs = {}) # %mul_tensor_59 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_4, 1), kwargs = {}) # %amax_default_59 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_59, [2], True), kwargs = {}) # %sub_tensor_59 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_59, %amax_default_59), kwargs = {}) # %div_tensor_59 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_59, 1.0), kwargs = {}) # %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_59,), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [2], True), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm_12), kwargs = {}) # %mul_tensor_57 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_6, 1), kwargs = {}) # %amax_default_57 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_57, [2], True), kwargs = {}) # %sub_tensor_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_57, %amax_default_57), kwargs = {}) # %div_tensor_57 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_57, 1.0), kwargs = {}) # %exp_6 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_57,), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_6, [2], True), kwargs = {}) triton_poi_fused__softmax_eq_masked_fill_10 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr2 + (4*x2), xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp71 = tl.load(in_ptr3 + (4*x2), xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr3 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp78 = tl.load(in_ptr3 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp82 = tl.load(in_ptr3 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp101 = tl.load(in_ptr4 + (4*x2), xmask, eviction_policy='evict_last') tmp104 = tl.load(in_ptr4 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp108 = tl.load(in_ptr4 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp112 = tl.load(in_ptr4 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp8 == tmp1 tmp11 = tl.where(tmp9, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp15 = tmp14 == tmp1 tmp17 = tl.where(tmp15, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp21 = tmp20 == tmp1 tmp23 = tl.where(tmp21, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tmp26 * tmp6 tmp28 = tl_math.exp(tmp27) tmp29 = tmp12 - tmp25 tmp30 = tmp29 * tmp6 tmp31 = tl_math.exp(tmp30) tmp32 = tmp28 + tmp31 tmp33 = tmp18 - tmp25 tmp34 = tmp33 * tmp6 tmp35 = tl_math.exp(tmp34) tmp36 = tmp32 + tmp35 tmp37 = tmp24 - tmp25 tmp38 = tmp37 * tmp6 tmp39 = tl_math.exp(tmp38) tmp40 = tmp36 + tmp39 tmp42 = tl.where(tmp2, tmp4, tmp41) tmp43 = tmp42 * tmp6 tmp45 = tl.where(tmp9, tmp4, tmp44) tmp46 = tmp45 * tmp6 tmp47 = triton_helpers.maximum(tmp43, tmp46) tmp49 = tl.where(tmp15, tmp4, tmp48) tmp50 = tmp49 * tmp6 tmp51 = triton_helpers.maximum(tmp47, tmp50) tmp53 = tl.where(tmp21, tmp4, tmp52) tmp54 = tmp53 * tmp6 tmp55 = triton_helpers.maximum(tmp51, tmp54) tmp56 = tmp43 - tmp55 tmp57 = tmp56 * tmp6 tmp58 = tl_math.exp(tmp57) tmp59 = tmp46 - tmp55 tmp60 = tmp59 * tmp6 tmp61 = tl_math.exp(tmp60) tmp62 = tmp58 + tmp61 tmp63 = tmp50 - tmp55 tmp64 = tmp63 * tmp6 tmp65 = tl_math.exp(tmp64) tmp66 = tmp62 + tmp65 tmp67 = tmp54 - tmp55 tmp68 = tmp67 * tmp6 tmp69 = tl_math.exp(tmp68) tmp70 = tmp66 + tmp69 tmp72 = tl.where(tmp2, tmp4, tmp71) tmp73 = tmp72 * tmp6 tmp75 = tl.where(tmp9, tmp4, tmp74) tmp76 = tmp75 * tmp6 tmp77 = triton_helpers.maximum(tmp73, tmp76) tmp79 = tl.where(tmp15, tmp4, tmp78) tmp80 = tmp79 * tmp6 tmp81 = triton_helpers.maximum(tmp77, tmp80) tmp83 = tl.where(tmp21, tmp4, tmp82) tmp84 = tmp83 * tmp6 tmp85 = triton_helpers.maximum(tmp81, tmp84) tmp86 = tmp73 - tmp85 tmp87 = tmp86 * tmp6 tmp88 = tl_math.exp(tmp87) tmp89 = tmp76 - tmp85 tmp90 = tmp89 * tmp6 tmp91 = tl_math.exp(tmp90) tmp92 = tmp88 + tmp91 tmp93 = tmp80 - tmp85 tmp94 = tmp93 * tmp6 tmp95 = tl_math.exp(tmp94) tmp96 = tmp92 + tmp95 tmp97 = tmp84 - tmp85 tmp98 = tmp97 * tmp6 tmp99 = tl_math.exp(tmp98) tmp100 = tmp96 + tmp99 tmp102 = tl.where(tmp2, tmp4, tmp101) tmp103 = tmp102 * tmp6 tmp105 = tl.where(tmp9, tmp4, tmp104) tmp106 = tmp105 * tmp6 tmp107 = triton_helpers.maximum(tmp103, tmp106) tmp109 = tl.where(tmp15, tmp4, tmp108) tmp110 = tmp109 * tmp6 tmp111 = triton_helpers.maximum(tmp107, tmp110) tmp113 = tl.where(tmp21, tmp4, tmp112) tmp114 = tmp113 * tmp6 tmp115 = triton_helpers.maximum(tmp111, tmp114) tmp116 = tmp103 - tmp115 tmp117 = tmp116 * tmp6 tmp118 = tl_math.exp(tmp117) tmp119 = tmp106 - tmp115 tmp120 = tmp119 * tmp6 tmp121 = tl_math.exp(tmp120) tmp122 = tmp118 + tmp121 tmp123 = tmp110 - tmp115 tmp124 = tmp123 * tmp6 tmp125 = tl_math.exp(tmp124) tmp126 = tmp122 + tmp125 tmp127 = tmp114 - tmp115 tmp128 = tmp127 * tmp6 tmp129 = tl_math.exp(tmp128) tmp130 = tmp126 + tmp129 tl.store(out_ptr0 + (x2), tmp25, xmask) tl.store(out_ptr1 + (x2), tmp40, xmask) tl.store(out_ptr2 + (x2), tmp55, xmask) tl.store(out_ptr3 + (x2), tmp70, xmask) tl.store(out_ptr4 + (x2), tmp85, xmask) tl.store(out_ptr5 + (x2), tmp100, xmask) tl.store(out_ptr6 + (x2), tmp115, xmask) tl.store(out_ptr7 + (x2), tmp130, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/x5/cx5ojg5jm52l43opnjjljovkvoshvjbeeoave5wumdtpxrscwgc5.py # Topologically Sorted Source Nodes: [eq, masked_fill, interMAF_q2v, masked_fill_2, interMAF_q2v_1, masked_fill_4, interMAF_q2v_2, masked_fill_6, interMAF_q2v_3], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # eq => eq # interMAF_q2v => div_2, exp # interMAF_q2v_1 => div_6, exp_2 # interMAF_q2v_2 => div_10, exp_4 # interMAF_q2v_3 => div_14, exp_6 # masked_fill => full_default, where # masked_fill_2 => where_2 # masked_fill_4 => where_4 # masked_fill_6 => where_6 # Graph fragment: # %eq : [num_users=32] = call_function[target=torch.ops.aten.eq.Scalar](args = (%expand_2, 0), kwargs = {}) # %full_default : [num_users=64] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm), kwargs = {}) # %mul_tensor_63 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {}) # %amax_default_63 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_63, [2], True), kwargs = {}) # %sub_tensor_63 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_63, %amax_default_63), kwargs = {}) # %div_tensor_63 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_63, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_63,), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm_4), kwargs = {}) # %mul_tensor_61 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_2, 1), kwargs = {}) # %amax_default_61 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_61, [2], True), kwargs = {}) # %sub_tensor_61 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_61, %amax_default_61), kwargs = {}) # %div_tensor_61 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_61, 1.0), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_61,), kwargs = {}) # %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_3), kwargs = {}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm_8), kwargs = {}) # %mul_tensor_59 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_4, 1), kwargs = {}) # %amax_default_59 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_59, [2], True), kwargs = {}) # %sub_tensor_59 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_59, %amax_default_59), kwargs = {}) # %div_tensor_59 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_59, 1.0), kwargs = {}) # %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_59,), kwargs = {}) # %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_4, %sum_5), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm_12), kwargs = {}) # %mul_tensor_57 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_6, 1), kwargs = {}) # %amax_default_57 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_57, [2], True), kwargs = {}) # %sub_tensor_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_57, %amax_default_57), kwargs = {}) # %div_tensor_57 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_57, 1.0), kwargs = {}) # %exp_6 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_57,), kwargs = {}) # %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_6, %sum_7), kwargs = {}) triton_poi_fused__softmax_eq_masked_fill_11 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_11', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_11(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex x4 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + (x3), xmask) tmp8 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_out_ptr1 + (x3), xmask) tmp17 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr4 + (x4), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr2 + (x3), xmask) tmp26 = tl.load(in_ptr5 + (x4), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + (x4), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_out_ptr3 + (x3), xmask) tmp35 = tl.load(in_ptr7 + (x4), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr8 + (x4), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp6 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp15 = tl.where(tmp2, tmp4, tmp14) tmp16 = tmp15 * tmp6 tmp18 = tmp16 - tmp17 tmp19 = tmp18 * tmp6 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp24 = tl.where(tmp2, tmp4, tmp23) tmp25 = tmp24 * tmp6 tmp27 = tmp25 - tmp26 tmp28 = tmp27 * tmp6 tmp29 = tl_math.exp(tmp28) tmp31 = tmp29 / tmp30 tmp33 = tl.where(tmp2, tmp4, tmp32) tmp34 = tmp33 * tmp6 tmp36 = tmp34 - tmp35 tmp37 = tmp36 * tmp6 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tl.store(in_out_ptr0 + (x3), tmp13, xmask) tl.store(in_out_ptr1 + (x3), tmp22, xmask) tl.store(in_out_ptr2 + (x3), tmp31, xmask) tl.store(in_out_ptr3 + (x3), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tu/ctuj43cmwzwyra3jufljkm5ydcvewwdecauzoq5mpn7jxeilejui.py # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_1, interMAF_v2q, masked_fill_3, interMAF_v2q_1, masked_fill_5, interMAF_v2q_2], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] # Source node to ATen node mapping: # eq_1 => eq_1 # interMAF_v2q => exp_1, sum_2 # interMAF_v2q_1 => exp_3, sum_4 # interMAF_v2q_2 => exp_5, sum_6 # masked_fill => full_default # masked_fill_1 => where_1 # masked_fill_3 => where_3 # masked_fill_5 => where_5 # Graph fragment: # %full_default : [num_users=64] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq_1 : [num_users=32] = call_function[target=torch.ops.aten.eq.Scalar](args = (%expand_5, 0), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default, %bmm_1), kwargs = {}) # %mul_tensor_62 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, 1), kwargs = {}) # %amax_default_62 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_62, [2], True), kwargs = {}) # %sub_tensor_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_62, %amax_default_62), kwargs = {}) # %div_tensor_62 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_62, 1.0), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_62,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [2], True), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default, %bmm_5), kwargs = {}) # %mul_tensor_60 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_3, 1), kwargs = {}) # %amax_default_60 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_60, [2], True), kwargs = {}) # %sub_tensor_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_60, %amax_default_60), kwargs = {}) # %div_tensor_60 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_60, 1.0), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_60,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [2], True), kwargs = {}) # %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default, %bmm_9), kwargs = {}) # %mul_tensor_58 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_5, 1), kwargs = {}) # %amax_default_58 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_58, [2], True), kwargs = {}) # %sub_tensor_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_58, %amax_default_58), kwargs = {}) # %div_tensor_58 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_58, 1.0), kwargs = {}) # %exp_5 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_58,), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_5, [2], True), kwargs = {}) triton_poi_fused__softmax_eq_masked_fill_12 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr2 + (4*x2), xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp71 = tl.load(in_ptr3 + (4*x2), xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr3 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp78 = tl.load(in_ptr3 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp82 = tl.load(in_ptr3 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp8 == tmp1 tmp11 = tl.where(tmp9, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp15 = tmp14 == tmp1 tmp17 = tl.where(tmp15, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp21 = tmp20 == tmp1 tmp23 = tl.where(tmp21, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tmp26 * tmp6 tmp28 = tl_math.exp(tmp27) tmp29 = tmp12 - tmp25 tmp30 = tmp29 * tmp6 tmp31 = tl_math.exp(tmp30) tmp32 = tmp28 + tmp31 tmp33 = tmp18 - tmp25 tmp34 = tmp33 * tmp6 tmp35 = tl_math.exp(tmp34) tmp36 = tmp32 + tmp35 tmp37 = tmp24 - tmp25 tmp38 = tmp37 * tmp6 tmp39 = tl_math.exp(tmp38) tmp40 = tmp36 + tmp39 tmp42 = tl.where(tmp2, tmp4, tmp41) tmp43 = tmp42 * tmp6 tmp45 = tl.where(tmp9, tmp4, tmp44) tmp46 = tmp45 * tmp6 tmp47 = triton_helpers.maximum(tmp43, tmp46) tmp49 = tl.where(tmp15, tmp4, tmp48) tmp50 = tmp49 * tmp6 tmp51 = triton_helpers.maximum(tmp47, tmp50) tmp53 = tl.where(tmp21, tmp4, tmp52) tmp54 = tmp53 * tmp6 tmp55 = triton_helpers.maximum(tmp51, tmp54) tmp56 = tmp43 - tmp55 tmp57 = tmp56 * tmp6 tmp58 = tl_math.exp(tmp57) tmp59 = tmp46 - tmp55 tmp60 = tmp59 * tmp6 tmp61 = tl_math.exp(tmp60) tmp62 = tmp58 + tmp61 tmp63 = tmp50 - tmp55 tmp64 = tmp63 * tmp6 tmp65 = tl_math.exp(tmp64) tmp66 = tmp62 + tmp65 tmp67 = tmp54 - tmp55 tmp68 = tmp67 * tmp6 tmp69 = tl_math.exp(tmp68) tmp70 = tmp66 + tmp69 tmp72 = tl.where(tmp2, tmp4, tmp71) tmp73 = tmp72 * tmp6 tmp75 = tl.where(tmp9, tmp4, tmp74) tmp76 = tmp75 * tmp6 tmp77 = triton_helpers.maximum(tmp73, tmp76) tmp79 = tl.where(tmp15, tmp4, tmp78) tmp80 = tmp79 * tmp6 tmp81 = triton_helpers.maximum(tmp77, tmp80) tmp83 = tl.where(tmp21, tmp4, tmp82) tmp84 = tmp83 * tmp6 tmp85 = triton_helpers.maximum(tmp81, tmp84) tmp86 = tmp73 - tmp85 tmp87 = tmp86 * tmp6 tmp88 = tl_math.exp(tmp87) tmp89 = tmp76 - tmp85 tmp90 = tmp89 * tmp6 tmp91 = tl_math.exp(tmp90) tmp92 = tmp88 + tmp91 tmp93 = tmp80 - tmp85 tmp94 = tmp93 * tmp6 tmp95 = tl_math.exp(tmp94) tmp96 = tmp92 + tmp95 tmp97 = tmp84 - tmp85 tmp98 = tmp97 * tmp6 tmp99 = tl_math.exp(tmp98) tmp100 = tmp96 + tmp99 tl.store(out_ptr0 + (x2), tmp25, xmask) tl.store(out_ptr1 + (x2), tmp40, xmask) tl.store(out_ptr2 + (x2), tmp55, xmask) tl.store(out_ptr3 + (x2), tmp70, xmask) tl.store(out_ptr4 + (x2), tmp85, xmask) tl.store(out_ptr5 + (x2), tmp100, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/oq/coqh775v4e3sm7t6vxepeh6librdwwpbap27ocwafbe4vr6vf6vd.py # Topologically Sorted Source Nodes: [v_update], Original ATen: [aten.bmm] # Source node to ATen node mapping: # v_update => bmm_2 # Graph fragment: # %bmm_2 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_6, %expand_7), kwargs = {}) triton_poi_fused_bmm_13 = async_compile.triton('triton_poi_fused_bmm_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (8 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mq/cmqxrfo34tx5are4k7czci3rrdilpgzbqdmc2ptzhle47gjg3cvl.py # Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_6 => bmm_6 # Graph fragment: # %bmm_6 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_16, %expand_17), kwargs = {}) triton_poi_fused_bmm_14 = async_compile.triton('triton_poi_fused_bmm_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (9 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6u/c6usye5743zjm6svieqwmopokgox3dvihtowe47riybfxy3oag65.py # Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_10 => bmm_10 # Graph fragment: # %bmm_10 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_26, %expand_27), kwargs = {}) triton_poi_fused_bmm_15 = async_compile.triton('triton_poi_fused_bmm_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_15(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (10 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ex/cexdpmlfp22tbjwn6sxyy2p36m52tblyucveudmkzuefibygz5z7.py # Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_14 => bmm_14 # Graph fragment: # %bmm_14 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_36, %expand_37), kwargs = {}) triton_poi_fused_bmm_16 = async_compile.triton('triton_poi_fused_bmm_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_16(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (11 + (12*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xn/cxndbepzjiwiwr6mjnpgdvd476pxwncssydiut5r4itzo7n7d6xi.py # Topologically Sorted Source Nodes: [v_update_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # v_update_3 => cat_4 # Graph fragment: # %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_2, %bmm_14], 2), kwargs = {}) triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.full([1], 1, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp0 >= tmp8 tmp13 = tmp12 & tmp7 tmp14 = tl.load(in_ptr1 + (x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp11, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp7, tmp15, tmp16) tmp18 = tmp0 >= tmp5 tmp19 = tmp18 & tmp4 tmp20 = tl.load(in_ptr2 + (x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.where(tmp6, tmp17, tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tmp25 = tl.full([1], 4, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tl.load(in_ptr3 + (x1), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.where(tmp4, tmp23, tmp27) tl.store(out_ptr0 + (x0 + (8*x1)), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ce/ccexibaxlyfjz2rd4raowxat5ctmhrniqkkt4wkr7k3mnd743jgt.py # Topologically Sorted Source Nodes: [cat_v], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_v => cat_6 # Graph fragment: # %cat_6 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %cat_4], 2), kwargs = {}) triton_poi_fused_cat_18 = async_compile.triton('triton_poi_fused_cat_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_18(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/k4/ck4ipofcd6dbfdwi3wfxxdk7xncbrfbbm2wgfcpazu4ckaokohan.py # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_7, interMAF_v2q_3, mul_2, sum_1, v_mean, relu_2], Original ATen: [aten.masked_fill, aten.eq, aten._softmax, aten.mul, aten.sum, aten.div, aten.relu] # Source node to ATen node mapping: # eq_1 => eq_1 # interMAF_v2q_3 => exp_7, sum_8 # masked_fill => full_default # masked_fill_7 => where_7 # mul_2 => mul_2 # relu_2 => relu_2 # sum_1 => sum_9 # v_mean => div_16 # Graph fragment: # %full_default : [num_users=64] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq_1 : [num_users=32] = call_function[target=torch.ops.aten.eq.Scalar](args = (%expand_5, 0), kwargs = {}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default, %bmm_13), kwargs = {}) # %mul_tensor_56 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_7, 1), kwargs = {}) # %amax_default_56 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_56, [2], True), kwargs = {}) # %sub_tensor_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_56, %amax_default_56), kwargs = {}) # %div_tensor_56 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_56, 1.0), kwargs = {}) # %exp_7 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_56,), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_7, [2], True), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_57, %unsqueeze), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %div_16 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_9, %unsqueeze_11), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%div_16,), kwargs = {}) triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19 = async_compile.triton('triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp42 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp49 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp53 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp8 == tmp1 tmp11 = tl.where(tmp9, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp15 = tmp14 == tmp1 tmp17 = tl.where(tmp15, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp21 = tmp20 == tmp1 tmp23 = tl.where(tmp21, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tmp26 * tmp6 tmp28 = tl_math.exp(tmp27) tmp29 = tmp12 - tmp25 tmp30 = tmp29 * tmp6 tmp31 = tl_math.exp(tmp30) tmp32 = tmp28 + tmp31 tmp33 = tmp18 - tmp25 tmp34 = tmp33 * tmp6 tmp35 = tl_math.exp(tmp34) tmp36 = tmp32 + tmp35 tmp37 = tmp24 - tmp25 tmp38 = tmp37 * tmp6 tmp39 = tl_math.exp(tmp38) tmp40 = tmp36 + tmp39 tmp43 = tmp41 + tmp42 tmp44 = tmp43 * tmp0 tmp46 = tmp45 + tmp42 tmp47 = tmp46 * tmp8 tmp48 = tmp44 + tmp47 tmp50 = tmp49 + tmp42 tmp51 = tmp50 * tmp14 tmp52 = tmp48 + tmp51 tmp54 = tmp53 + tmp42 tmp55 = tmp54 * tmp20 tmp56 = tmp52 + tmp55 tmp57 = tmp0 + tmp8 tmp58 = tmp57 + tmp14 tmp59 = tmp58 + tmp20 tmp60 = tmp56 / tmp59 tmp61 = tl.full([1], 0, tl.int32) tmp62 = triton_helpers.maximum(tmp61, tmp60) tl.store(out_ptr0 + (x2), tmp25, xmask) tl.store(out_ptr1 + (x2), tmp40, xmask) tl.store(in_out_ptr0 + (x2), tmp62, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kd/ckdiqhnyoi67hufca6tuuuy4obbhhpyzgayg6gvbh7yztdvhoqpb.py # Topologically Sorted Source Nodes: [mul_3, sum_3, q_mean, relu_3], Original ATen: [aten.mul, aten.sum, aten.div, aten.relu] # Source node to ATen node mapping: # mul_3 => mul_3 # q_mean => div_17 # relu_3 => relu_3 # sum_3 => sum_11 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_59, %unsqueeze_1), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {}) # %div_17 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_11, %unsqueeze_13), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%div_17,), kwargs = {}) triton_poi_fused_div_mul_relu_sum_20 = async_compile.triton('triton_poi_fused_div_mul_relu_sum_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_relu_sum_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_relu_sum_20(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tmp11 = tmp10 + tmp1 tmp13 = tmp11 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 + tmp1 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp3 + tmp7 tmp21 = tmp20 + tmp12 tmp22 = tmp21 + tmp17 tmp23 = tmp19 / tmp22 tmp24 = tl.full([1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(in_out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/h4/ch4ho3earbx75wctqmmpb5c5zltjzv6c6btnxrxn526rmxvpimps.py # Topologically Sorted Source Nodes: [relu_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu_4 => relu_4 # Graph fragment: # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_57,), kwargs = {}) # %le_19 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_21 = async_compile.triton('triton_poi_fused_relu_threshold_backward_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tq/ctqgwkwxg36wqnxn5g6aalxpua5om3ceriychppnprzgujtwr44b.py # Topologically Sorted Source Nodes: [add, new_vq, new_vk], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # add => add # new_vk => mul_7 # new_vq => mul_6 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_15, 1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %getitem_31), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %getitem_30), kwargs = {}) triton_poi_fused_add_mul_22 = async_compile.triton('triton_poi_fused_add_mul_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_22(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + (12*x3)), xmask) tmp6 = tl.load(in_ptr1 + (x0 + (12*x3)), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp3 * tmp6 tl.store(out_ptr0 + (x4), tmp5, xmask) tl.store(out_ptr1 + (x4), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pa/cpav2uzgm3tjogwsiudbdun7n7g3vzu7w3ojupkipweuw5oframa.py # Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_16 => bmm_16 # Graph fragment: # %bmm_16 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_40, %expand_41), kwargs = {}) triton_poi_fused_bmm_23 = async_compile.triton('triton_poi_fused_bmm_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_23(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/d6/cd6dgn3m6x4om4kekys52phap73y5rfy5ly7re2f26w7hjwbbcj6.py # Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_20 => bmm_20 # Graph fragment: # %bmm_20 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_50, %expand_51), kwargs = {}) triton_poi_fused_bmm_24 = async_compile.triton('triton_poi_fused_bmm_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_24(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dj/cdj5tr5rwy4iwctjkoyamufzbvwctubawxljot5ugjmsdvce22zd.py # Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_24 => bmm_24 # Graph fragment: # %bmm_24 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_60, %expand_61), kwargs = {}) triton_poi_fused_bmm_25 = async_compile.triton('triton_poi_fused_bmm_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_25(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/p7/cp7sng53axoajjyppfvk7sbnvnszmt32774sysrobofbsqshq6bc.py # Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm] # Source node to ATen node mapping: # matmul_28 => bmm_28 # Graph fragment: # %bmm_28 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_70, %expand_71), kwargs = {}) triton_poi_fused_bmm_26 = async_compile.triton('triton_poi_fused_bmm_26', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_26(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hd/chdmtwzmpe3ibulmpj7rgvupiaa4yoijntazetip5plxgqnxrlwf.py # Topologically Sorted Source Nodes: [v_update_7, add_4], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # add_4 => add_4 # v_update_7 => cat_12 # Graph fragment: # %cat_12 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_10, %bmm_30], 2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_57, %cat_12), kwargs = {}) triton_poi_fused_add_cat_27 = async_compile.triton('triton_poi_fused_add_cat_27', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_27', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_27(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp29 = tl.load(in_ptr4 + (x2), xmask) tmp30 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.full([1], 1, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp0 >= tmp8 tmp13 = tmp12 & tmp7 tmp14 = tl.load(in_ptr1 + (x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp11, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp7, tmp15, tmp16) tmp18 = tmp0 >= tmp5 tmp19 = tmp18 & tmp4 tmp20 = tl.load(in_ptr2 + (x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.where(tmp6, tmp17, tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tmp25 = tl.full([1], 4, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tl.load(in_ptr3 + (x1), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.where(tmp4, tmp23, tmp27) tmp31 = tmp29 + tmp30 tmp32 = tmp31 + tmp28 tl.store(in_out_ptr0 + (x2), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bc/cbco7lemgvcpj2suxmhmzoaeweuileme6nrhq7coqrp3zye3mjwc.py # Topologically Sorted Source Nodes: [v_update_15, add_10], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # add_10 => add_10 # v_update_15 => cat_26 # Graph fragment: # %cat_26 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_24, %bmm_62], 2), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_169, %cat_26), kwargs = {}) triton_poi_fused_add_cat_28 = async_compile.triton('triton_poi_fused_add_cat_28', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_28', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_28(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp29 = tl.load(in_out_ptr0 + (x2), xmask) tmp30 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.full([1], 1, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp0 >= tmp8 tmp13 = tmp12 & tmp7 tmp14 = tl.load(in_ptr1 + (x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp11, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp7, tmp15, tmp16) tmp18 = tmp0 >= tmp5 tmp19 = tmp18 & tmp4 tmp20 = tl.load(in_ptr2 + (x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.where(tmp6, tmp17, tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tmp25 = tl.full([1], 4, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tl.load(in_ptr3 + (x1), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.where(tmp4, tmp23, tmp27) tmp31 = tmp29 + tmp30 tmp32 = tmp31 + tmp28 tl.store(in_out_ptr0 + (x2), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (12, 4), (4, 1)) assert_size_stride(primals_10, (12, ), (1, )) assert_size_stride(primals_11, (12, 4), (4, 1)) assert_size_stride(primals_12, (12, ), (1, )) assert_size_stride(primals_13, (4, 8), (8, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 8), (8, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4, ), (1, )) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4, ), (1, )) assert_size_stride(primals_21, (12, 4), (4, 1)) assert_size_stride(primals_22, (12, ), (1, )) assert_size_stride(primals_23, (12, 4), (4, 1)) assert_size_stride(primals_24, (12, ), (1, )) assert_size_stride(primals_25, (4, 4), (4, 1)) assert_size_stride(primals_26, (4, ), (1, )) assert_size_stride(primals_27, (4, 4), (4, 1)) assert_size_stride(primals_28, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf673 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf0, buf2, buf673, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf672 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf1, buf4, buf672, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 12), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 12), (48, 12, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [v_trans_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf6, primals_10, primals_7, 192, grid=grid(192), stream=stream0) buf7 = reinterpret_tensor(buf5, (4, 4, 12), (48, 12, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [q_trans_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf7, primals_12, primals_8, 192, grid=grid(192), stream=stream0) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] triton_poi_fused_bmm_2.run(buf6, buf8, 16, grid=grid(16), stream=stream0) buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] triton_poi_fused_bmm_3.run(buf7, buf9, 16, grid=grid(16), stream=stream0) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(buf8, buf9, out=buf10) buf11 = reinterpret_tensor(buf9, (4, 4, 1), (4, 1, 16), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] triton_poi_fused_bmm_2.run(buf7, buf11, 16, grid=grid(16), stream=stream0) buf12 = reinterpret_tensor(buf8, (4, 1, 4), (4, 16, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] triton_poi_fused_bmm_3.run(buf6, buf12, 16, grid=grid(16), stream=stream0) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf11, buf12, out=buf13) buf24 = reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 16), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf6, buf24, 16, grid=grid(16), stream=stream0) buf25 = reinterpret_tensor(buf11, (4, 1, 4), (4, 16, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm] triton_poi_fused_bmm_5.run(buf7, buf25, 16, grid=grid(16), stream=stream0) buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm] extern_kernels.bmm(buf24, buf25, out=buf26) buf40 = reinterpret_tensor(buf25, (4, 4, 1), (4, 1, 16), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf6, buf40, 16, grid=grid(16), stream=stream0) buf41 = reinterpret_tensor(buf24, (4, 1, 4), (4, 16, 1), 0); del buf24 # reuse # Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf7, buf41, 16, grid=grid(16), stream=stream0) buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm] extern_kernels.bmm(buf40, buf41, out=buf42) buf56 = reinterpret_tensor(buf41, (4, 4, 1), (4, 1, 16), 0); del buf41 # reuse # Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm] triton_poi_fused_bmm_8.run(buf6, buf56, 16, grid=grid(16), stream=stream0) buf57 = reinterpret_tensor(buf40, (4, 1, 4), (4, 16, 1), 0); del buf40 # reuse # Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm] triton_poi_fused_bmm_9.run(buf7, buf57, 16, grid=grid(16), stream=stream0) buf58 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm] extern_kernels.bmm(buf56, buf57, out=buf58) buf14 = reinterpret_tensor(buf57, (4, 4, 1), (4, 1, 16), 0); del buf57 # reuse buf15 = buf56; del buf56 # reuse buf30 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf31 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf46 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf47 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf62 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf63 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [eq, masked_fill, interMAF_q2v, masked_fill_2, interMAF_q2v_1, masked_fill_4, interMAF_q2v_2, masked_fill_6, interMAF_q2v_3], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_8, buf10, buf26, buf42, buf58, buf14, buf15, buf30, buf31, buf46, buf47, buf62, buf63, 16, grid=grid(16), stream=stream0) buf16 = buf10; del buf10 # reuse buf32 = buf26; del buf26 # reuse buf48 = buf42; del buf42 # reuse buf64 = buf58; del buf58 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, interMAF_q2v, masked_fill_2, interMAF_q2v_1, masked_fill_4, interMAF_q2v_2, masked_fill_6, interMAF_q2v_3], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf16, buf32, buf48, buf64, primals_8, buf14, buf15, buf30, buf31, buf46, buf47, buf62, buf63, 64, grid=grid(64), stream=stream0) buf27 = buf63; del buf63 # reuse # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf7, buf27, 16, grid=grid(16), stream=stream0) buf28 = reinterpret_tensor(buf62, (4, 1, 4), (4, 16, 1), 0); del buf62 # reuse # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] triton_poi_fused_bmm_5.run(buf6, buf28, 16, grid=grid(16), stream=stream0) buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] extern_kernels.bmm(buf27, buf28, out=buf29) buf43 = reinterpret_tensor(buf28, (4, 4, 1), (4, 1, 16), 0); del buf28 # reuse # Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf7, buf43, 16, grid=grid(16), stream=stream0) buf44 = reinterpret_tensor(buf27, (4, 1, 4), (4, 16, 1), 0); del buf27 # reuse # Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf6, buf44, 16, grid=grid(16), stream=stream0) buf45 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm] extern_kernels.bmm(buf43, buf44, out=buf45) buf17 = reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 16), 0); del buf44 # reuse buf18 = buf43; del buf43 # reuse buf33 = buf47; del buf47 # reuse buf34 = buf46; del buf46 # reuse buf49 = buf31; del buf31 # reuse buf50 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_1, interMAF_v2q, masked_fill_3, interMAF_v2q_1, masked_fill_5, interMAF_v2q_2], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_12.run(primals_7, buf13, buf29, buf45, buf17, buf18, buf33, buf34, buf49, buf50, 16, grid=grid(16), stream=stream0) buf59 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm] triton_poi_fused_bmm_8.run(buf7, buf59, 16, grid=grid(16), stream=stream0) buf60 = reinterpret_tensor(buf14, (4, 1, 4), (4, 16, 1), 0); del buf14 # reuse # Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm] triton_poi_fused_bmm_9.run(buf6, buf60, 16, grid=grid(16), stream=stream0) buf61 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm] extern_kernels.bmm(buf59, buf60, out=buf61) buf20 = reinterpret_tensor(buf60, (4, 4, 1), (4, 1, 16), 0); del buf60 # reuse # Topologically Sorted Source Nodes: [v_update], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf7, buf20, 16, grid=grid(16), stream=stream0) buf21 = reinterpret_tensor(buf59, (4, 4, 1), (4, 1, 1), 0); del buf59 # reuse # Topologically Sorted Source Nodes: [v_update], Original ATen: [aten.bmm] extern_kernels.bmm(buf16, buf20, out=buf21) buf36 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf7, buf36, 16, grid=grid(16), stream=stream0) buf37 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm] extern_kernels.bmm(buf32, buf36, out=buf37) buf52 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf7, buf52, 16, grid=grid(16), stream=stream0) buf53 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.bmm] extern_kernels.bmm(buf48, buf52, out=buf53) buf68 = buf52; del buf52 # reuse # Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf7, buf68, 16, grid=grid(16), stream=stream0) buf69 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm] extern_kernels.bmm(buf64, buf68, out=buf69) buf75 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf70 = reinterpret_tensor(buf75, (4, 4, 4), (32, 8, 1), 4) # alias # Topologically Sorted Source Nodes: [v_update_3], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf21, buf37, buf53, buf69, buf70, 64, grid=grid(64), stream=stream0) buf74 = reinterpret_tensor(buf75, (4, 4, 4), (32, 8, 1), 0) # alias # Topologically Sorted Source Nodes: [cat_v], Original ATen: [aten.cat] triton_poi_fused_cat_18.run(buf0, buf74, 64, grid=grid(64), stream=stream0) buf78 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf75, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf78) buf65 = reinterpret_tensor(buf69, (4, 4, 1), (4, 1, 16), 0); del buf69 # reuse buf66 = reinterpret_tensor(buf53, (4, 4, 1), (4, 1, 16), 0); del buf53 # reuse buf80 = reinterpret_tensor(buf37, (4, 4), (4, 1), 0); del buf37 # reuse buf82 = buf80; del buf80 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_7, interMAF_v2q_3, mul_2, sum_1, v_mean, relu_2], Original ATen: [aten.masked_fill, aten.eq, aten._softmax, aten.mul, aten.sum, aten.div, aten.relu] triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19.run(buf82, primals_7, buf61, buf78, primals_14, buf65, buf66, 16, grid=grid(16), stream=stream0) buf19 = buf13; del buf13 # reuse buf35 = buf29; del buf29 # reuse buf51 = buf45; del buf45 # reuse buf67 = buf61; del buf61 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_1, interMAF_v2q, masked_fill_3, interMAF_v2q_1, masked_fill_5, interMAF_v2q_2, masked_fill_7, interMAF_v2q_3], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf19, buf35, buf51, buf67, primals_7, buf17, buf18, buf33, buf34, buf49, buf50, buf65, buf66, 64, grid=grid(64), stream=stream0) buf22 = buf66; del buf66 # reuse # Topologically Sorted Source Nodes: [q_update], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf6, buf22, 16, grid=grid(16), stream=stream0) buf23 = reinterpret_tensor(buf65, (4, 4, 1), (4, 1, 1), 0); del buf65 # reuse # Topologically Sorted Source Nodes: [q_update], Original ATen: [aten.bmm] extern_kernels.bmm(buf19, buf22, out=buf23) buf38 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf6, buf38, 16, grid=grid(16), stream=stream0) buf39 = reinterpret_tensor(buf50, (4, 4, 1), (4, 1, 1), 0); del buf50 # reuse # Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm] extern_kernels.bmm(buf35, buf38, out=buf39) buf54 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [matmul_11], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf6, buf54, 16, grid=grid(16), stream=stream0) buf55 = reinterpret_tensor(buf49, (4, 4, 1), (4, 1, 1), 0); del buf49 # reuse # Topologically Sorted Source Nodes: [matmul_11], Original ATen: [aten.bmm] extern_kernels.bmm(buf51, buf54, out=buf55) buf71 = buf54; del buf54 # reuse # Topologically Sorted Source Nodes: [matmul_15], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf6, buf71, 16, grid=grid(16), stream=stream0) buf72 = reinterpret_tensor(buf34, (4, 4, 1), (4, 1, 1), 0); del buf34 # reuse # Topologically Sorted Source Nodes: [matmul_15], Original ATen: [aten.bmm] extern_kernels.bmm(buf67, buf71, out=buf72) buf77 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf73 = reinterpret_tensor(buf77, (4, 4, 4), (32, 8, 1), 4) # alias # Topologically Sorted Source Nodes: [q_update_3], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf23, buf39, buf55, buf72, buf73, 64, grid=grid(64), stream=stream0) buf76 = reinterpret_tensor(buf77, (4, 4, 4), (32, 8, 1), 0) # alias # Topologically Sorted Source Nodes: [cat_q], Original ATen: [aten.cat] triton_poi_fused_cat_18.run(buf1, buf76, 64, grid=grid(64), stream=stream0) buf79 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf77, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf79) buf81 = reinterpret_tensor(buf72, (4, 4), (4, 1), 0); del buf72 # reuse buf84 = buf81; del buf81 # reuse # Topologically Sorted Source Nodes: [mul_3, sum_3, q_mean, relu_3], Original ATen: [aten.mul, aten.sum, aten.div, aten.relu] triton_poi_fused_div_mul_relu_sum_20.run(buf84, buf79, primals_16, primals_8, 16, grid=grid(16), stream=stream0) buf83 = reinterpret_tensor(buf55, (4, 4), (4, 1), 0); del buf55 # reuse # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_18, buf82, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf83) buf85 = reinterpret_tensor(buf39, (4, 4), (4, 1), 0); del buf39 # reuse # Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_20, buf84, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf85) buf86 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf671 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_21.run(buf78, primals_14, buf86, buf671, 64, grid=grid(64), stream=stream0) buf87 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf86, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 12), (1, 4), 0), out=buf87) buf88 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf670 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_5], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_21.run(buf79, primals_16, buf88, buf670, 64, grid=grid(64), stream=stream0) buf89 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf88, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf89) buf90 = reinterpret_tensor(buf87, (4, 4, 12), (48, 12, 1), 0); del buf87 # reuse # Topologically Sorted Source Nodes: [v_trans_3], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf90, primals_22, primals_7, 192, grid=grid(192), stream=stream0) buf91 = reinterpret_tensor(buf89, (4, 4, 12), (48, 12, 1), 0); del buf89 # reuse # Topologically Sorted Source Nodes: [q_trans_3], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf91, primals_24, primals_8, 192, grid=grid(192), stream=stream0) buf92 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf93 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, new_vq, new_vk], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_22.run(buf85, buf90, buf92, buf93, 64, grid=grid(64), stream=stream0) buf94 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0); del buf23 # reuse # Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf92, buf94, 16, grid=grid(16), stream=stream0) buf95 = reinterpret_tensor(buf71, (4, 1, 4), (4, 16, 1), 0); del buf71 # reuse # Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf93, buf95, 16, grid=grid(16), stream=stream0) buf96 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm] extern_kernels.bmm(buf94, buf95, out=buf96) buf97 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf98 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_2, new_qq, new_qk], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_22.run(buf83, buf91, buf97, buf98, 64, grid=grid(64), stream=stream0) buf99 = reinterpret_tensor(buf95, (4, 4, 1), (4, 1, 16), 0); del buf95 # reuse # Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf97, buf99, 16, grid=grid(16), stream=stream0) buf100 = reinterpret_tensor(buf94, (4, 1, 4), (4, 16, 1), 0); del buf94 # reuse # Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf98, buf100, 16, grid=grid(16), stream=stream0) buf101 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm] extern_kernels.bmm(buf99, buf100, out=buf101) buf112 = buf99; del buf99 # reuse # Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf92, buf112, 16, grid=grid(16), stream=stream0) buf113 = buf100; del buf100 # reuse # Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf93, buf113, 16, grid=grid(16), stream=stream0) buf114 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm] extern_kernels.bmm(buf112, buf113, out=buf114) buf128 = reinterpret_tensor(buf113, (4, 4, 1), (4, 1, 16), 0); del buf113 # reuse # Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf92, buf128, 16, grid=grid(16), stream=stream0) buf129 = reinterpret_tensor(buf112, (4, 1, 4), (4, 16, 1), 0); del buf112 # reuse # Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf93, buf129, 16, grid=grid(16), stream=stream0) buf130 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm] extern_kernels.bmm(buf128, buf129, out=buf130) buf144 = reinterpret_tensor(buf129, (4, 4, 1), (4, 1, 16), 0); del buf129 # reuse # Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf92, buf144, 16, grid=grid(16), stream=stream0) buf145 = reinterpret_tensor(buf128, (4, 1, 4), (4, 16, 1), 0); del buf128 # reuse # Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf93, buf145, 16, grid=grid(16), stream=stream0) buf146 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm] extern_kernels.bmm(buf144, buf145, out=buf146) buf102 = reinterpret_tensor(buf145, (4, 4, 1), (4, 1, 16), 0); del buf145 # reuse buf103 = buf144; del buf144 # reuse buf118 = buf33; del buf33 # reuse buf119 = buf18; del buf18 # reuse buf134 = buf17; del buf17 # reuse buf135 = reinterpret_tensor(buf21, (4, 4, 1), (4, 1, 16), 0); del buf21 # reuse buf150 = buf68; del buf68 # reuse buf151 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_8, dyIntraMAF_v2v, masked_fill_10, dyIntraMAF_v2v_1, masked_fill_12, dyIntraMAF_v2v_2, masked_fill_14, dyIntraMAF_v2v_3], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_7, buf96, buf114, buf130, buf146, buf102, buf103, buf118, buf119, buf134, buf135, buf150, buf151, 16, grid=grid(16), stream=stream0) buf104 = buf96; del buf96 # reuse buf120 = buf114; del buf114 # reuse buf136 = buf130; del buf130 # reuse buf152 = buf146; del buf146 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_8, dyIntraMAF_v2v, masked_fill_10, dyIntraMAF_v2v_1, masked_fill_12, dyIntraMAF_v2v_2, masked_fill_14, dyIntraMAF_v2v_3], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf104, buf120, buf136, buf152, primals_7, buf102, buf103, buf118, buf119, buf134, buf135, buf150, buf151, 64, grid=grid(64), stream=stream0) buf115 = buf151; del buf151 # reuse # Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf97, buf115, 16, grid=grid(16), stream=stream0) buf116 = reinterpret_tensor(buf150, (4, 1, 4), (4, 16, 1), 0); del buf150 # reuse # Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf98, buf116, 16, grid=grid(16), stream=stream0) buf117 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.bmm] extern_kernels.bmm(buf115, buf116, out=buf117) buf131 = reinterpret_tensor(buf116, (4, 4, 1), (4, 1, 16), 0); del buf116 # reuse # Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf97, buf131, 16, grid=grid(16), stream=stream0) buf132 = reinterpret_tensor(buf115, (4, 1, 4), (4, 16, 1), 0); del buf115 # reuse # Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf98, buf132, 16, grid=grid(16), stream=stream0) buf133 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm] extern_kernels.bmm(buf131, buf132, out=buf133) buf147 = reinterpret_tensor(buf132, (4, 4, 1), (4, 1, 16), 0); del buf132 # reuse # Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf97, buf147, 16, grid=grid(16), stream=stream0) buf148 = reinterpret_tensor(buf131, (4, 1, 4), (4, 16, 1), 0); del buf131 # reuse # Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf98, buf148, 16, grid=grid(16), stream=stream0) buf149 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm] extern_kernels.bmm(buf147, buf148, out=buf149) buf105 = reinterpret_tensor(buf148, (4, 4, 1), (4, 1, 16), 0); del buf148 # reuse buf106 = buf147; del buf147 # reuse buf121 = buf135; del buf135 # reuse buf122 = buf134; del buf134 # reuse buf137 = buf119; del buf119 # reuse buf138 = buf118; del buf118 # reuse buf153 = buf103; del buf103 # reuse buf154 = buf102; del buf102 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_9, dyIntraMAF_q2q, masked_fill_11, dyIntraMAF_q2q_1, masked_fill_13, dyIntraMAF_q2q_2, masked_fill_15, dyIntraMAF_q2q_3], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_8, buf101, buf117, buf133, buf149, buf105, buf106, buf121, buf122, buf137, buf138, buf153, buf154, 16, grid=grid(16), stream=stream0) buf107 = buf101; del buf101 # reuse buf123 = buf117; del buf117 # reuse buf139 = buf133; del buf133 # reuse buf155 = buf149; del buf149 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_9, dyIntraMAF_q2q, masked_fill_11, dyIntraMAF_q2q_1, masked_fill_13, dyIntraMAF_q2q_2, masked_fill_15, dyIntraMAF_q2q_3], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf107, buf123, buf139, buf155, primals_8, buf105, buf106, buf121, buf122, buf137, buf138, buf153, buf154, 64, grid=grid(64), stream=stream0) buf108 = buf154; del buf154 # reuse # Topologically Sorted Source Nodes: [v_update_4], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf90, buf108, 16, grid=grid(16), stream=stream0) buf109 = reinterpret_tensor(buf153, (4, 4, 1), (4, 1, 1), 0); del buf153 # reuse # Topologically Sorted Source Nodes: [v_update_4], Original ATen: [aten.bmm] extern_kernels.bmm(buf104, buf108, out=buf109) buf110 = buf108; del buf108 # reuse # Topologically Sorted Source Nodes: [q_update_4], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf91, buf110, 16, grid=grid(16), stream=stream0) buf111 = reinterpret_tensor(buf138, (4, 4, 1), (4, 1, 1), 0); del buf138 # reuse # Topologically Sorted Source Nodes: [q_update_4], Original ATen: [aten.bmm] extern_kernels.bmm(buf107, buf110, out=buf111) buf124 = buf110; del buf110 # reuse # Topologically Sorted Source Nodes: [matmul_22], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf90, buf124, 16, grid=grid(16), stream=stream0) buf125 = reinterpret_tensor(buf137, (4, 4, 1), (4, 1, 1), 0); del buf137 # reuse # Topologically Sorted Source Nodes: [matmul_22], Original ATen: [aten.bmm] extern_kernels.bmm(buf120, buf124, out=buf125) buf126 = buf124; del buf124 # reuse # Topologically Sorted Source Nodes: [matmul_23], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf91, buf126, 16, grid=grid(16), stream=stream0) buf127 = reinterpret_tensor(buf122, (4, 4, 1), (4, 1, 1), 0); del buf122 # reuse # Topologically Sorted Source Nodes: [matmul_23], Original ATen: [aten.bmm] extern_kernels.bmm(buf123, buf126, out=buf127) buf140 = buf126; del buf126 # reuse # Topologically Sorted Source Nodes: [matmul_26], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf90, buf140, 16, grid=grid(16), stream=stream0) buf141 = reinterpret_tensor(buf121, (4, 4, 1), (4, 1, 1), 0); del buf121 # reuse # Topologically Sorted Source Nodes: [matmul_26], Original ATen: [aten.bmm] extern_kernels.bmm(buf136, buf140, out=buf141) buf142 = buf140; del buf140 # reuse # Topologically Sorted Source Nodes: [matmul_27], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf91, buf142, 16, grid=grid(16), stream=stream0) buf143 = reinterpret_tensor(buf106, (4, 4, 1), (4, 1, 1), 0); del buf106 # reuse # Topologically Sorted Source Nodes: [matmul_27], Original ATen: [aten.bmm] extern_kernels.bmm(buf139, buf142, out=buf143) buf156 = buf142; del buf142 # reuse # Topologically Sorted Source Nodes: [matmul_30], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf90, buf156, 16, grid=grid(16), stream=stream0) buf157 = reinterpret_tensor(buf105, (4, 4, 1), (4, 1, 1), 0); del buf105 # reuse # Topologically Sorted Source Nodes: [matmul_30], Original ATen: [aten.bmm] extern_kernels.bmm(buf152, buf156, out=buf157) buf158 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf162 = buf158; del buf158 # reuse # Topologically Sorted Source Nodes: [v_update_7, add_4], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_27.run(buf162, buf109, buf125, buf141, buf157, buf78, primals_14, 64, grid=grid(64), stream=stream0) buf159 = reinterpret_tensor(buf157, (4, 4, 1), (4, 1, 16), 0); del buf157 # reuse # Topologically Sorted Source Nodes: [matmul_31], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf91, buf159, 16, grid=grid(16), stream=stream0) buf160 = buf141; del buf141 # reuse # Topologically Sorted Source Nodes: [matmul_31], Original ATen: [aten.bmm] extern_kernels.bmm(buf155, buf159, out=buf160) buf161 = reinterpret_tensor(buf78, (4, 4, 4), (16, 4, 1), 0); del buf78 # reuse buf164 = buf161; del buf161 # reuse # Topologically Sorted Source Nodes: [q_update_7, add_5], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_27.run(buf164, buf111, buf127, buf143, buf160, buf79, primals_16, 64, grid=grid(64), stream=stream0) buf163 = buf79; del buf79 # reuse # Topologically Sorted Source Nodes: [updated_v_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_26, reinterpret_tensor(buf162, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf163) buf165 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [updated_q_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_28, reinterpret_tensor(buf164, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf165) buf166 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf669 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_6], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf163, buf166, buf669, 64, grid=grid(64), stream=stream0) buf167 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf166, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf167) buf168 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf668 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_7], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf165, buf168, buf668, 64, grid=grid(64), stream=stream0) buf169 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf168, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 12), (1, 4), 0), out=buf169) buf170 = reinterpret_tensor(buf167, (4, 4, 12), (48, 12, 1), 0); del buf167 # reuse # Topologically Sorted Source Nodes: [v_trans_5], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf170, primals_10, primals_7, 192, grid=grid(192), stream=stream0) buf171 = reinterpret_tensor(buf169, (4, 4, 12), (48, 12, 1), 0); del buf169 # reuse # Topologically Sorted Source Nodes: [q_trans_5], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf171, primals_12, primals_8, 192, grid=grid(192), stream=stream0) buf172 = reinterpret_tensor(buf160, (4, 4, 1), (4, 1, 16), 0); del buf160 # reuse # Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.bmm] triton_poi_fused_bmm_2.run(buf170, buf172, 16, grid=grid(16), stream=stream0) buf173 = reinterpret_tensor(buf143, (4, 1, 4), (4, 16, 1), 0); del buf143 # reuse # Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.bmm] triton_poi_fused_bmm_3.run(buf171, buf173, 16, grid=grid(16), stream=stream0) buf174 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.bmm] extern_kernels.bmm(buf172, buf173, out=buf174) buf175 = reinterpret_tensor(buf173, (4, 4, 1), (4, 1, 16), 0); del buf173 # reuse # Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm] triton_poi_fused_bmm_2.run(buf171, buf175, 16, grid=grid(16), stream=stream0) buf176 = reinterpret_tensor(buf172, (4, 1, 4), (4, 16, 1), 0); del buf172 # reuse # Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm] triton_poi_fused_bmm_3.run(buf170, buf176, 16, grid=grid(16), stream=stream0) buf177 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm] extern_kernels.bmm(buf175, buf176, out=buf177) buf188 = reinterpret_tensor(buf176, (4, 4, 1), (4, 1, 16), 0); del buf176 # reuse # Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf170, buf188, 16, grid=grid(16), stream=stream0) buf189 = reinterpret_tensor(buf175, (4, 1, 4), (4, 16, 1), 0); del buf175 # reuse # Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm] triton_poi_fused_bmm_5.run(buf171, buf189, 16, grid=grid(16), stream=stream0) buf190 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm] extern_kernels.bmm(buf188, buf189, out=buf190) buf204 = reinterpret_tensor(buf189, (4, 4, 1), (4, 1, 16), 0); del buf189 # reuse # Topologically Sorted Source Nodes: [matmul_40], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf170, buf204, 16, grid=grid(16), stream=stream0) buf205 = reinterpret_tensor(buf188, (4, 1, 4), (4, 16, 1), 0); del buf188 # reuse # Topologically Sorted Source Nodes: [matmul_40], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf171, buf205, 16, grid=grid(16), stream=stream0) buf206 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_40], Original ATen: [aten.bmm] extern_kernels.bmm(buf204, buf205, out=buf206) buf220 = reinterpret_tensor(buf205, (4, 4, 1), (4, 1, 16), 0); del buf205 # reuse # Topologically Sorted Source Nodes: [matmul_44], Original ATen: [aten.bmm] triton_poi_fused_bmm_8.run(buf170, buf220, 16, grid=grid(16), stream=stream0) buf221 = reinterpret_tensor(buf204, (4, 1, 4), (4, 16, 1), 0); del buf204 # reuse # Topologically Sorted Source Nodes: [matmul_44], Original ATen: [aten.bmm] triton_poi_fused_bmm_9.run(buf171, buf221, 16, grid=grid(16), stream=stream0) buf222 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_44], Original ATen: [aten.bmm] extern_kernels.bmm(buf220, buf221, out=buf222) buf178 = reinterpret_tensor(buf221, (4, 4, 1), (4, 1, 16), 0); del buf221 # reuse buf179 = buf220; del buf220 # reuse buf194 = reinterpret_tensor(buf127, (4, 4, 1), (4, 1, 16), 0); del buf127 # reuse buf195 = reinterpret_tensor(buf111, (4, 4, 1), (4, 1, 16), 0); del buf111 # reuse buf210 = buf159; del buf159 # reuse buf211 = reinterpret_tensor(buf125, (4, 4, 1), (4, 1, 16), 0); del buf125 # reuse buf226 = reinterpret_tensor(buf109, (4, 4, 1), (4, 1, 16), 0); del buf109 # reuse buf227 = buf156; del buf156 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_16, interMAF_q2v_4, masked_fill_18, interMAF_q2v_5, masked_fill_20, interMAF_q2v_6, masked_fill_22, interMAF_q2v_7], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_8, buf174, buf190, buf206, buf222, buf178, buf179, buf194, buf195, buf210, buf211, buf226, buf227, 16, grid=grid(16), stream=stream0) buf180 = buf174; del buf174 # reuse buf196 = buf190; del buf190 # reuse buf212 = buf206; del buf206 # reuse buf228 = buf222; del buf222 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_16, interMAF_q2v_4, masked_fill_18, interMAF_q2v_5, masked_fill_20, interMAF_q2v_6, masked_fill_22, interMAF_q2v_7], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf180, buf196, buf212, buf228, primals_8, buf178, buf179, buf194, buf195, buf210, buf211, buf226, buf227, 64, grid=grid(64), stream=stream0) buf191 = buf227; del buf227 # reuse # Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf171, buf191, 16, grid=grid(16), stream=stream0) buf192 = reinterpret_tensor(buf226, (4, 1, 4), (4, 16, 1), 0); del buf226 # reuse # Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm] triton_poi_fused_bmm_5.run(buf170, buf192, 16, grid=grid(16), stream=stream0) buf193 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm] extern_kernels.bmm(buf191, buf192, out=buf193) buf207 = reinterpret_tensor(buf192, (4, 4, 1), (4, 1, 16), 0); del buf192 # reuse # Topologically Sorted Source Nodes: [matmul_41], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf171, buf207, 16, grid=grid(16), stream=stream0) buf208 = reinterpret_tensor(buf191, (4, 1, 4), (4, 16, 1), 0); del buf191 # reuse # Topologically Sorted Source Nodes: [matmul_41], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf170, buf208, 16, grid=grid(16), stream=stream0) buf209 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_41], Original ATen: [aten.bmm] extern_kernels.bmm(buf207, buf208, out=buf209) buf181 = reinterpret_tensor(buf208, (4, 4, 1), (4, 1, 16), 0); del buf208 # reuse buf182 = buf207; del buf207 # reuse buf197 = buf211; del buf211 # reuse buf198 = buf210; del buf210 # reuse buf213 = buf195; del buf195 # reuse buf214 = buf194; del buf194 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_17, interMAF_v2q_4, masked_fill_19, interMAF_v2q_5, masked_fill_21, interMAF_v2q_6], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_12.run(primals_7, buf177, buf193, buf209, buf181, buf182, buf197, buf198, buf213, buf214, 16, grid=grid(16), stream=stream0) buf223 = buf179; del buf179 # reuse # Topologically Sorted Source Nodes: [matmul_45], Original ATen: [aten.bmm] triton_poi_fused_bmm_8.run(buf171, buf223, 16, grid=grid(16), stream=stream0) buf224 = reinterpret_tensor(buf178, (4, 1, 4), (4, 16, 1), 0); del buf178 # reuse # Topologically Sorted Source Nodes: [matmul_45], Original ATen: [aten.bmm] triton_poi_fused_bmm_9.run(buf170, buf224, 16, grid=grid(16), stream=stream0) buf225 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_45], Original ATen: [aten.bmm] extern_kernels.bmm(buf223, buf224, out=buf225) buf184 = reinterpret_tensor(buf224, (4, 4, 1), (4, 1, 16), 0); del buf224 # reuse # Topologically Sorted Source Nodes: [v_update_8], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf171, buf184, 16, grid=grid(16), stream=stream0) buf185 = reinterpret_tensor(buf223, (4, 4, 1), (4, 1, 1), 0); del buf223 # reuse # Topologically Sorted Source Nodes: [v_update_8], Original ATen: [aten.bmm] extern_kernels.bmm(buf180, buf184, out=buf185) buf200 = buf184; del buf184 # reuse # Topologically Sorted Source Nodes: [matmul_38], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf171, buf200, 16, grid=grid(16), stream=stream0) buf201 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_38], Original ATen: [aten.bmm] extern_kernels.bmm(buf196, buf200, out=buf201) buf216 = buf200; del buf200 # reuse # Topologically Sorted Source Nodes: [matmul_42], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf171, buf216, 16, grid=grid(16), stream=stream0) buf217 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_42], Original ATen: [aten.bmm] extern_kernels.bmm(buf212, buf216, out=buf217) buf232 = buf216; del buf216 # reuse # Topologically Sorted Source Nodes: [matmul_46], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf171, buf232, 16, grid=grid(16), stream=stream0) buf233 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_46], Original ATen: [aten.bmm] extern_kernels.bmm(buf228, buf232, out=buf233) buf239 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf234 = reinterpret_tensor(buf239, (4, 4, 4), (32, 8, 1), 4) # alias # Topologically Sorted Source Nodes: [v_update_11], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf185, buf201, buf217, buf233, buf234, 64, grid=grid(64), stream=stream0) buf238 = reinterpret_tensor(buf239, (4, 4, 4), (32, 8, 1), 0) # alias # Topologically Sorted Source Nodes: [cat_v_1], Original ATen: [aten.cat] triton_poi_fused_cat_18.run(buf163, buf238, 64, grid=grid(64), stream=stream0) buf242 = buf163; del buf163 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf239, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf242) buf229 = reinterpret_tensor(buf233, (4, 4, 1), (4, 1, 16), 0); del buf233 # reuse buf230 = reinterpret_tensor(buf217, (4, 4, 1), (4, 1, 16), 0); del buf217 # reuse buf244 = reinterpret_tensor(buf201, (4, 4), (4, 1), 0); del buf201 # reuse buf246 = buf244; del buf244 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_23, interMAF_v2q_7, mul_12, sum_5, v_mean_1, relu_8], Original ATen: [aten.masked_fill, aten.eq, aten._softmax, aten.mul, aten.sum, aten.div, aten.relu] triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19.run(buf246, primals_7, buf225, buf242, primals_14, buf229, buf230, 16, grid=grid(16), stream=stream0) buf183 = buf177; del buf177 # reuse buf199 = buf193; del buf193 # reuse buf215 = buf209; del buf209 # reuse buf231 = buf225; del buf225 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_17, interMAF_v2q_4, masked_fill_19, interMAF_v2q_5, masked_fill_21, interMAF_v2q_6, masked_fill_23, interMAF_v2q_7], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf183, buf199, buf215, buf231, primals_7, buf181, buf182, buf197, buf198, buf213, buf214, buf229, buf230, 64, grid=grid(64), stream=stream0) buf186 = buf230; del buf230 # reuse # Topologically Sorted Source Nodes: [q_update_8], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf170, buf186, 16, grid=grid(16), stream=stream0) buf187 = reinterpret_tensor(buf229, (4, 4, 1), (4, 1, 1), 0); del buf229 # reuse # Topologically Sorted Source Nodes: [q_update_8], Original ATen: [aten.bmm] extern_kernels.bmm(buf183, buf186, out=buf187) buf202 = buf186; del buf186 # reuse # Topologically Sorted Source Nodes: [matmul_39], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf170, buf202, 16, grid=grid(16), stream=stream0) buf203 = reinterpret_tensor(buf214, (4, 4, 1), (4, 1, 1), 0); del buf214 # reuse # Topologically Sorted Source Nodes: [matmul_39], Original ATen: [aten.bmm] extern_kernels.bmm(buf199, buf202, out=buf203) buf218 = buf202; del buf202 # reuse # Topologically Sorted Source Nodes: [matmul_43], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf170, buf218, 16, grid=grid(16), stream=stream0) buf219 = reinterpret_tensor(buf213, (4, 4, 1), (4, 1, 1), 0); del buf213 # reuse # Topologically Sorted Source Nodes: [matmul_43], Original ATen: [aten.bmm] extern_kernels.bmm(buf215, buf218, out=buf219) buf235 = buf218; del buf218 # reuse # Topologically Sorted Source Nodes: [matmul_47], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf170, buf235, 16, grid=grid(16), stream=stream0) buf236 = reinterpret_tensor(buf198, (4, 4, 1), (4, 1, 1), 0); del buf198 # reuse # Topologically Sorted Source Nodes: [matmul_47], Original ATen: [aten.bmm] extern_kernels.bmm(buf231, buf235, out=buf236) buf241 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf237 = reinterpret_tensor(buf241, (4, 4, 4), (32, 8, 1), 4) # alias # Topologically Sorted Source Nodes: [q_update_11], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf187, buf203, buf219, buf236, buf237, 64, grid=grid(64), stream=stream0) buf240 = reinterpret_tensor(buf241, (4, 4, 4), (32, 8, 1), 0) # alias # Topologically Sorted Source Nodes: [cat_q_1], Original ATen: [aten.cat] triton_poi_fused_cat_18.run(buf165, buf240, 64, grid=grid(64), stream=stream0) buf243 = buf165; del buf165 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf241, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf243) buf245 = reinterpret_tensor(buf236, (4, 4), (4, 1), 0); del buf236 # reuse buf248 = buf245; del buf245 # reuse # Topologically Sorted Source Nodes: [mul_13, sum_7, q_mean_1, relu_9], Original ATen: [aten.mul, aten.sum, aten.div, aten.relu] triton_poi_fused_div_mul_relu_sum_20.run(buf248, buf243, primals_16, primals_8, 16, grid=grid(16), stream=stream0) buf247 = reinterpret_tensor(buf219, (4, 4), (4, 1), 0); del buf219 # reuse # Topologically Sorted Source Nodes: [linear_16], Original ATen: [aten.addmm] extern_kernels.addmm(primals_18, buf246, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf247) buf249 = reinterpret_tensor(buf203, (4, 4), (4, 1), 0); del buf203 # reuse # Topologically Sorted Source Nodes: [linear_17], Original ATen: [aten.addmm] extern_kernels.addmm(primals_20, buf248, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf249) buf250 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf667 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_10], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_21.run(buf242, primals_14, buf250, buf667, 64, grid=grid(64), stream=stream0) buf251 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf250, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 12), (1, 4), 0), out=buf251) buf252 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf666 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_11], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_21.run(buf243, primals_16, buf252, buf666, 64, grid=grid(64), stream=stream0) buf253 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf252, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf253) buf254 = reinterpret_tensor(buf251, (4, 4, 12), (48, 12, 1), 0); del buf251 # reuse # Topologically Sorted Source Nodes: [v_trans_7], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf254, primals_22, primals_7, 192, grid=grid(192), stream=stream0) buf255 = reinterpret_tensor(buf253, (4, 4, 12), (48, 12, 1), 0); del buf253 # reuse # Topologically Sorted Source Nodes: [q_trans_7], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf255, primals_24, primals_8, 192, grid=grid(192), stream=stream0) buf256 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf257 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_6, new_vq_1, new_vk_1], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_22.run(buf249, buf254, buf256, buf257, 64, grid=grid(64), stream=stream0) buf258 = reinterpret_tensor(buf187, (4, 4, 1), (4, 1, 16), 0); del buf187 # reuse # Topologically Sorted Source Nodes: [matmul_48], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf256, buf258, 16, grid=grid(16), stream=stream0) buf259 = reinterpret_tensor(buf235, (4, 1, 4), (4, 16, 1), 0); del buf235 # reuse # Topologically Sorted Source Nodes: [matmul_48], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf257, buf259, 16, grid=grid(16), stream=stream0) buf260 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_48], Original ATen: [aten.bmm] extern_kernels.bmm(buf258, buf259, out=buf260) buf261 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf262 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_8, new_qq_1, new_qk_1], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_22.run(buf247, buf255, buf261, buf262, 64, grid=grid(64), stream=stream0) buf263 = reinterpret_tensor(buf259, (4, 4, 1), (4, 1, 16), 0); del buf259 # reuse # Topologically Sorted Source Nodes: [matmul_49], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf261, buf263, 16, grid=grid(16), stream=stream0) buf264 = reinterpret_tensor(buf258, (4, 1, 4), (4, 16, 1), 0); del buf258 # reuse # Topologically Sorted Source Nodes: [matmul_49], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf262, buf264, 16, grid=grid(16), stream=stream0) buf265 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_49], Original ATen: [aten.bmm] extern_kernels.bmm(buf263, buf264, out=buf265) buf276 = reinterpret_tensor(buf264, (4, 4, 1), (4, 1, 16), 0); del buf264 # reuse # Topologically Sorted Source Nodes: [matmul_52], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf256, buf276, 16, grid=grid(16), stream=stream0) buf277 = reinterpret_tensor(buf263, (4, 1, 4), (4, 16, 1), 0); del buf263 # reuse # Topologically Sorted Source Nodes: [matmul_52], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf257, buf277, 16, grid=grid(16), stream=stream0) buf278 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_52], Original ATen: [aten.bmm] extern_kernels.bmm(buf276, buf277, out=buf278) buf292 = reinterpret_tensor(buf277, (4, 4, 1), (4, 1, 16), 0); del buf277 # reuse # Topologically Sorted Source Nodes: [matmul_56], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf256, buf292, 16, grid=grid(16), stream=stream0) buf293 = reinterpret_tensor(buf276, (4, 1, 4), (4, 16, 1), 0); del buf276 # reuse # Topologically Sorted Source Nodes: [matmul_56], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf257, buf293, 16, grid=grid(16), stream=stream0) buf294 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_56], Original ATen: [aten.bmm] extern_kernels.bmm(buf292, buf293, out=buf294) buf308 = reinterpret_tensor(buf293, (4, 4, 1), (4, 1, 16), 0); del buf293 # reuse # Topologically Sorted Source Nodes: [matmul_60], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf256, buf308, 16, grid=grid(16), stream=stream0) buf309 = reinterpret_tensor(buf292, (4, 1, 4), (4, 16, 1), 0); del buf292 # reuse # Topologically Sorted Source Nodes: [matmul_60], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf257, buf309, 16, grid=grid(16), stream=stream0) buf310 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_60], Original ATen: [aten.bmm] extern_kernels.bmm(buf308, buf309, out=buf310) buf266 = reinterpret_tensor(buf309, (4, 4, 1), (4, 1, 16), 0); del buf309 # reuse buf267 = buf308; del buf308 # reuse buf282 = buf197; del buf197 # reuse buf283 = buf182; del buf182 # reuse buf298 = buf181; del buf181 # reuse buf299 = reinterpret_tensor(buf185, (4, 4, 1), (4, 1, 16), 0); del buf185 # reuse buf314 = buf232; del buf232 # reuse buf315 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_24, dyIntraMAF_v2v_4, masked_fill_26, dyIntraMAF_v2v_5, masked_fill_28, dyIntraMAF_v2v_6, masked_fill_30, dyIntraMAF_v2v_7], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_7, buf260, buf278, buf294, buf310, buf266, buf267, buf282, buf283, buf298, buf299, buf314, buf315, 16, grid=grid(16), stream=stream0) buf268 = buf260; del buf260 # reuse buf284 = buf278; del buf278 # reuse buf300 = buf294; del buf294 # reuse buf316 = buf310; del buf310 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_24, dyIntraMAF_v2v_4, masked_fill_26, dyIntraMAF_v2v_5, masked_fill_28, dyIntraMAF_v2v_6, masked_fill_30, dyIntraMAF_v2v_7], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf268, buf284, buf300, buf316, primals_7, buf266, buf267, buf282, buf283, buf298, buf299, buf314, buf315, 64, grid=grid(64), stream=stream0) buf279 = buf315; del buf315 # reuse # Topologically Sorted Source Nodes: [matmul_53], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf261, buf279, 16, grid=grid(16), stream=stream0) buf280 = reinterpret_tensor(buf314, (4, 1, 4), (4, 16, 1), 0); del buf314 # reuse # Topologically Sorted Source Nodes: [matmul_53], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf262, buf280, 16, grid=grid(16), stream=stream0) buf281 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_53], Original ATen: [aten.bmm] extern_kernels.bmm(buf279, buf280, out=buf281) buf295 = reinterpret_tensor(buf280, (4, 4, 1), (4, 1, 16), 0); del buf280 # reuse # Topologically Sorted Source Nodes: [matmul_57], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf261, buf295, 16, grid=grid(16), stream=stream0) buf296 = reinterpret_tensor(buf279, (4, 1, 4), (4, 16, 1), 0); del buf279 # reuse # Topologically Sorted Source Nodes: [matmul_57], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf262, buf296, 16, grid=grid(16), stream=stream0) buf297 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_57], Original ATen: [aten.bmm] extern_kernels.bmm(buf295, buf296, out=buf297) buf311 = reinterpret_tensor(buf296, (4, 4, 1), (4, 1, 16), 0); del buf296 # reuse # Topologically Sorted Source Nodes: [matmul_61], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf261, buf311, 16, grid=grid(16), stream=stream0) buf312 = reinterpret_tensor(buf295, (4, 1, 4), (4, 16, 1), 0); del buf295 # reuse # Topologically Sorted Source Nodes: [matmul_61], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf262, buf312, 16, grid=grid(16), stream=stream0) buf313 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_61], Original ATen: [aten.bmm] extern_kernels.bmm(buf311, buf312, out=buf313) buf269 = reinterpret_tensor(buf312, (4, 4, 1), (4, 1, 16), 0); del buf312 # reuse buf270 = buf311; del buf311 # reuse buf285 = buf299; del buf299 # reuse buf286 = buf298; del buf298 # reuse buf301 = buf283; del buf283 # reuse buf302 = buf282; del buf282 # reuse buf317 = buf267; del buf267 # reuse buf318 = buf266; del buf266 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_25, dyIntraMAF_q2q_4, masked_fill_27, dyIntraMAF_q2q_5, masked_fill_29, dyIntraMAF_q2q_6, masked_fill_31, dyIntraMAF_q2q_7], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_8, buf265, buf281, buf297, buf313, buf269, buf270, buf285, buf286, buf301, buf302, buf317, buf318, 16, grid=grid(16), stream=stream0) buf271 = buf265; del buf265 # reuse buf287 = buf281; del buf281 # reuse buf303 = buf297; del buf297 # reuse buf319 = buf313; del buf313 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_25, dyIntraMAF_q2q_4, masked_fill_27, dyIntraMAF_q2q_5, masked_fill_29, dyIntraMAF_q2q_6, masked_fill_31, dyIntraMAF_q2q_7], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf271, buf287, buf303, buf319, primals_8, buf269, buf270, buf285, buf286, buf301, buf302, buf317, buf318, 64, grid=grid(64), stream=stream0) buf272 = buf318; del buf318 # reuse # Topologically Sorted Source Nodes: [v_update_12], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf254, buf272, 16, grid=grid(16), stream=stream0) buf273 = reinterpret_tensor(buf317, (4, 4, 1), (4, 1, 1), 0); del buf317 # reuse # Topologically Sorted Source Nodes: [v_update_12], Original ATen: [aten.bmm] extern_kernels.bmm(buf268, buf272, out=buf273) buf274 = buf272; del buf272 # reuse # Topologically Sorted Source Nodes: [q_update_12], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf255, buf274, 16, grid=grid(16), stream=stream0) buf275 = reinterpret_tensor(buf302, (4, 4, 1), (4, 1, 1), 0); del buf302 # reuse # Topologically Sorted Source Nodes: [q_update_12], Original ATen: [aten.bmm] extern_kernels.bmm(buf271, buf274, out=buf275) buf288 = buf274; del buf274 # reuse # Topologically Sorted Source Nodes: [matmul_54], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf254, buf288, 16, grid=grid(16), stream=stream0) buf289 = reinterpret_tensor(buf301, (4, 4, 1), (4, 1, 1), 0); del buf301 # reuse # Topologically Sorted Source Nodes: [matmul_54], Original ATen: [aten.bmm] extern_kernels.bmm(buf284, buf288, out=buf289) buf290 = buf288; del buf288 # reuse # Topologically Sorted Source Nodes: [matmul_55], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf255, buf290, 16, grid=grid(16), stream=stream0) buf291 = reinterpret_tensor(buf286, (4, 4, 1), (4, 1, 1), 0); del buf286 # reuse # Topologically Sorted Source Nodes: [matmul_55], Original ATen: [aten.bmm] extern_kernels.bmm(buf287, buf290, out=buf291) buf304 = buf290; del buf290 # reuse # Topologically Sorted Source Nodes: [matmul_58], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf254, buf304, 16, grid=grid(16), stream=stream0) buf305 = reinterpret_tensor(buf285, (4, 4, 1), (4, 1, 1), 0); del buf285 # reuse # Topologically Sorted Source Nodes: [matmul_58], Original ATen: [aten.bmm] extern_kernels.bmm(buf300, buf304, out=buf305) buf306 = buf304; del buf304 # reuse # Topologically Sorted Source Nodes: [matmul_59], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf255, buf306, 16, grid=grid(16), stream=stream0) buf307 = reinterpret_tensor(buf270, (4, 4, 1), (4, 1, 1), 0); del buf270 # reuse # Topologically Sorted Source Nodes: [matmul_59], Original ATen: [aten.bmm] extern_kernels.bmm(buf303, buf306, out=buf307) buf320 = buf306; del buf306 # reuse # Topologically Sorted Source Nodes: [matmul_62], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf254, buf320, 16, grid=grid(16), stream=stream0) buf321 = reinterpret_tensor(buf269, (4, 4, 1), (4, 1, 1), 0); del buf269 # reuse # Topologically Sorted Source Nodes: [matmul_62], Original ATen: [aten.bmm] extern_kernels.bmm(buf316, buf320, out=buf321) buf326 = reinterpret_tensor(buf242, (4, 4, 4), (16, 4, 1), 0); del buf242 # reuse # Topologically Sorted Source Nodes: [v_update_15, add_10], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_28.run(buf326, buf273, buf289, buf305, buf321, primals_14, 64, grid=grid(64), stream=stream0) buf323 = reinterpret_tensor(buf321, (4, 4, 1), (4, 1, 16), 0); del buf321 # reuse # Topologically Sorted Source Nodes: [matmul_63], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf255, buf323, 16, grid=grid(16), stream=stream0) buf324 = buf305; del buf305 # reuse # Topologically Sorted Source Nodes: [matmul_63], Original ATen: [aten.bmm] extern_kernels.bmm(buf319, buf323, out=buf324) buf328 = reinterpret_tensor(buf243, (4, 4, 4), (16, 4, 1), 0); del buf243 # reuse # Topologically Sorted Source Nodes: [q_update_15, add_11], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_28.run(buf328, buf275, buf291, buf307, buf324, primals_16, 64, grid=grid(64), stream=stream0) buf327 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [updated_v_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_26, reinterpret_tensor(buf326, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf327) buf329 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [updated_q_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_28, reinterpret_tensor(buf328, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf329) buf330 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf665 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_12], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf327, buf330, buf665, 64, grid=grid(64), stream=stream0) buf331 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf330, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf331) buf332 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf664 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_13], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf329, buf332, buf664, 64, grid=grid(64), stream=stream0) buf333 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf332, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 12), (1, 4), 0), out=buf333) buf334 = reinterpret_tensor(buf331, (4, 4, 12), (48, 12, 1), 0); del buf331 # reuse # Topologically Sorted Source Nodes: [v_trans_9], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf334, primals_10, primals_7, 192, grid=grid(192), stream=stream0) buf335 = reinterpret_tensor(buf333, (4, 4, 12), (48, 12, 1), 0); del buf333 # reuse # Topologically Sorted Source Nodes: [q_trans_9], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf335, primals_12, primals_8, 192, grid=grid(192), stream=stream0) buf336 = reinterpret_tensor(buf324, (4, 4, 1), (4, 1, 16), 0); del buf324 # reuse # Topologically Sorted Source Nodes: [matmul_64], Original ATen: [aten.bmm] triton_poi_fused_bmm_2.run(buf334, buf336, 16, grid=grid(16), stream=stream0) buf337 = reinterpret_tensor(buf307, (4, 1, 4), (4, 16, 1), 0); del buf307 # reuse # Topologically Sorted Source Nodes: [matmul_64], Original ATen: [aten.bmm] triton_poi_fused_bmm_3.run(buf335, buf337, 16, grid=grid(16), stream=stream0) buf338 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_64], Original ATen: [aten.bmm] extern_kernels.bmm(buf336, buf337, out=buf338) buf339 = reinterpret_tensor(buf337, (4, 4, 1), (4, 1, 16), 0); del buf337 # reuse # Topologically Sorted Source Nodes: [matmul_65], Original ATen: [aten.bmm] triton_poi_fused_bmm_2.run(buf335, buf339, 16, grid=grid(16), stream=stream0) buf340 = reinterpret_tensor(buf336, (4, 1, 4), (4, 16, 1), 0); del buf336 # reuse # Topologically Sorted Source Nodes: [matmul_65], Original ATen: [aten.bmm] triton_poi_fused_bmm_3.run(buf334, buf340, 16, grid=grid(16), stream=stream0) buf341 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_65], Original ATen: [aten.bmm] extern_kernels.bmm(buf339, buf340, out=buf341) buf352 = reinterpret_tensor(buf340, (4, 4, 1), (4, 1, 16), 0); del buf340 # reuse # Topologically Sorted Source Nodes: [matmul_68], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf334, buf352, 16, grid=grid(16), stream=stream0) buf353 = reinterpret_tensor(buf339, (4, 1, 4), (4, 16, 1), 0); del buf339 # reuse # Topologically Sorted Source Nodes: [matmul_68], Original ATen: [aten.bmm] triton_poi_fused_bmm_5.run(buf335, buf353, 16, grid=grid(16), stream=stream0) buf354 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_68], Original ATen: [aten.bmm] extern_kernels.bmm(buf352, buf353, out=buf354) buf368 = reinterpret_tensor(buf353, (4, 4, 1), (4, 1, 16), 0); del buf353 # reuse # Topologically Sorted Source Nodes: [matmul_72], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf334, buf368, 16, grid=grid(16), stream=stream0) buf369 = reinterpret_tensor(buf352, (4, 1, 4), (4, 16, 1), 0); del buf352 # reuse # Topologically Sorted Source Nodes: [matmul_72], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf335, buf369, 16, grid=grid(16), stream=stream0) buf370 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_72], Original ATen: [aten.bmm] extern_kernels.bmm(buf368, buf369, out=buf370) buf384 = reinterpret_tensor(buf369, (4, 4, 1), (4, 1, 16), 0); del buf369 # reuse # Topologically Sorted Source Nodes: [matmul_76], Original ATen: [aten.bmm] triton_poi_fused_bmm_8.run(buf334, buf384, 16, grid=grid(16), stream=stream0) buf385 = reinterpret_tensor(buf368, (4, 1, 4), (4, 16, 1), 0); del buf368 # reuse # Topologically Sorted Source Nodes: [matmul_76], Original ATen: [aten.bmm] triton_poi_fused_bmm_9.run(buf335, buf385, 16, grid=grid(16), stream=stream0) buf386 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_76], Original ATen: [aten.bmm] extern_kernels.bmm(buf384, buf385, out=buf386) buf342 = reinterpret_tensor(buf385, (4, 4, 1), (4, 1, 16), 0); del buf385 # reuse buf343 = buf384; del buf384 # reuse buf358 = reinterpret_tensor(buf291, (4, 4, 1), (4, 1, 16), 0); del buf291 # reuse buf359 = reinterpret_tensor(buf275, (4, 4, 1), (4, 1, 16), 0); del buf275 # reuse buf374 = buf323; del buf323 # reuse buf375 = reinterpret_tensor(buf289, (4, 4, 1), (4, 1, 16), 0); del buf289 # reuse buf390 = reinterpret_tensor(buf273, (4, 4, 1), (4, 1, 16), 0); del buf273 # reuse buf391 = buf320; del buf320 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_32, interMAF_q2v_8, masked_fill_34, interMAF_q2v_9, masked_fill_36, interMAF_q2v_10, masked_fill_38, interMAF_q2v_11], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_8, buf338, buf354, buf370, buf386, buf342, buf343, buf358, buf359, buf374, buf375, buf390, buf391, 16, grid=grid(16), stream=stream0) buf344 = buf338; del buf338 # reuse buf360 = buf354; del buf354 # reuse buf376 = buf370; del buf370 # reuse buf392 = buf386; del buf386 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_32, interMAF_q2v_8, masked_fill_34, interMAF_q2v_9, masked_fill_36, interMAF_q2v_10, masked_fill_38, interMAF_q2v_11], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf344, buf360, buf376, buf392, primals_8, buf342, buf343, buf358, buf359, buf374, buf375, buf390, buf391, 64, grid=grid(64), stream=stream0) buf355 = buf391; del buf391 # reuse # Topologically Sorted Source Nodes: [matmul_69], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf335, buf355, 16, grid=grid(16), stream=stream0) buf356 = reinterpret_tensor(buf390, (4, 1, 4), (4, 16, 1), 0); del buf390 # reuse # Topologically Sorted Source Nodes: [matmul_69], Original ATen: [aten.bmm] triton_poi_fused_bmm_5.run(buf334, buf356, 16, grid=grid(16), stream=stream0) buf357 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_69], Original ATen: [aten.bmm] extern_kernels.bmm(buf355, buf356, out=buf357) buf371 = reinterpret_tensor(buf356, (4, 4, 1), (4, 1, 16), 0); del buf356 # reuse # Topologically Sorted Source Nodes: [matmul_73], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf335, buf371, 16, grid=grid(16), stream=stream0) buf372 = reinterpret_tensor(buf355, (4, 1, 4), (4, 16, 1), 0); del buf355 # reuse # Topologically Sorted Source Nodes: [matmul_73], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf334, buf372, 16, grid=grid(16), stream=stream0) buf373 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_73], Original ATen: [aten.bmm] extern_kernels.bmm(buf371, buf372, out=buf373) buf345 = reinterpret_tensor(buf372, (4, 4, 1), (4, 1, 16), 0); del buf372 # reuse buf346 = buf371; del buf371 # reuse buf361 = buf375; del buf375 # reuse buf362 = buf374; del buf374 # reuse buf377 = buf359; del buf359 # reuse buf378 = buf358; del buf358 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_33, interMAF_v2q_8, masked_fill_35, interMAF_v2q_9, masked_fill_37, interMAF_v2q_10], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_12.run(primals_7, buf341, buf357, buf373, buf345, buf346, buf361, buf362, buf377, buf378, 16, grid=grid(16), stream=stream0) buf387 = buf343; del buf343 # reuse # Topologically Sorted Source Nodes: [matmul_77], Original ATen: [aten.bmm] triton_poi_fused_bmm_8.run(buf335, buf387, 16, grid=grid(16), stream=stream0) buf388 = reinterpret_tensor(buf342, (4, 1, 4), (4, 16, 1), 0); del buf342 # reuse # Topologically Sorted Source Nodes: [matmul_77], Original ATen: [aten.bmm] triton_poi_fused_bmm_9.run(buf334, buf388, 16, grid=grid(16), stream=stream0) buf389 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_77], Original ATen: [aten.bmm] extern_kernels.bmm(buf387, buf388, out=buf389) buf348 = reinterpret_tensor(buf388, (4, 4, 1), (4, 1, 16), 0); del buf388 # reuse # Topologically Sorted Source Nodes: [v_update_16], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf335, buf348, 16, grid=grid(16), stream=stream0) buf349 = reinterpret_tensor(buf387, (4, 4, 1), (4, 1, 1), 0); del buf387 # reuse # Topologically Sorted Source Nodes: [v_update_16], Original ATen: [aten.bmm] extern_kernels.bmm(buf344, buf348, out=buf349) buf364 = buf348; del buf348 # reuse # Topologically Sorted Source Nodes: [matmul_70], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf335, buf364, 16, grid=grid(16), stream=stream0) buf365 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_70], Original ATen: [aten.bmm] extern_kernels.bmm(buf360, buf364, out=buf365) buf380 = buf364; del buf364 # reuse # Topologically Sorted Source Nodes: [matmul_74], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf335, buf380, 16, grid=grid(16), stream=stream0) buf381 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_74], Original ATen: [aten.bmm] extern_kernels.bmm(buf376, buf380, out=buf381) buf396 = buf380; del buf380 # reuse # Topologically Sorted Source Nodes: [matmul_78], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf335, buf396, 16, grid=grid(16), stream=stream0) buf397 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_78], Original ATen: [aten.bmm] extern_kernels.bmm(buf392, buf396, out=buf397) buf403 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf398 = reinterpret_tensor(buf403, (4, 4, 4), (32, 8, 1), 4) # alias # Topologically Sorted Source Nodes: [v_update_19], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf349, buf365, buf381, buf397, buf398, 64, grid=grid(64), stream=stream0) buf402 = reinterpret_tensor(buf403, (4, 4, 4), (32, 8, 1), 0) # alias # Topologically Sorted Source Nodes: [cat_v_2], Original ATen: [aten.cat] triton_poi_fused_cat_18.run(buf327, buf402, 64, grid=grid(64), stream=stream0) buf406 = buf327; del buf327 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf403, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf406) buf393 = reinterpret_tensor(buf397, (4, 4, 1), (4, 1, 16), 0); del buf397 # reuse buf394 = reinterpret_tensor(buf381, (4, 4, 1), (4, 1, 16), 0); del buf381 # reuse buf408 = reinterpret_tensor(buf365, (4, 4), (4, 1), 0); del buf365 # reuse buf410 = buf408; del buf408 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_39, interMAF_v2q_11, mul_22, sum_9, v_mean_2, relu_14], Original ATen: [aten.masked_fill, aten.eq, aten._softmax, aten.mul, aten.sum, aten.div, aten.relu] triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19.run(buf410, primals_7, buf389, buf406, primals_14, buf393, buf394, 16, grid=grid(16), stream=stream0) buf347 = buf341; del buf341 # reuse buf363 = buf357; del buf357 # reuse buf379 = buf373; del buf373 # reuse buf395 = buf389; del buf389 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_33, interMAF_v2q_8, masked_fill_35, interMAF_v2q_9, masked_fill_37, interMAF_v2q_10, masked_fill_39, interMAF_v2q_11], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf347, buf363, buf379, buf395, primals_7, buf345, buf346, buf361, buf362, buf377, buf378, buf393, buf394, 64, grid=grid(64), stream=stream0) buf350 = buf394; del buf394 # reuse # Topologically Sorted Source Nodes: [q_update_16], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf334, buf350, 16, grid=grid(16), stream=stream0) buf351 = reinterpret_tensor(buf393, (4, 4, 1), (4, 1, 1), 0); del buf393 # reuse # Topologically Sorted Source Nodes: [q_update_16], Original ATen: [aten.bmm] extern_kernels.bmm(buf347, buf350, out=buf351) buf366 = buf350; del buf350 # reuse # Topologically Sorted Source Nodes: [matmul_71], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf334, buf366, 16, grid=grid(16), stream=stream0) buf367 = reinterpret_tensor(buf378, (4, 4, 1), (4, 1, 1), 0); del buf378 # reuse # Topologically Sorted Source Nodes: [matmul_71], Original ATen: [aten.bmm] extern_kernels.bmm(buf363, buf366, out=buf367) buf382 = buf366; del buf366 # reuse # Topologically Sorted Source Nodes: [matmul_75], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf334, buf382, 16, grid=grid(16), stream=stream0) buf383 = reinterpret_tensor(buf377, (4, 4, 1), (4, 1, 1), 0); del buf377 # reuse # Topologically Sorted Source Nodes: [matmul_75], Original ATen: [aten.bmm] extern_kernels.bmm(buf379, buf382, out=buf383) buf399 = buf382; del buf382 # reuse # Topologically Sorted Source Nodes: [matmul_79], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf334, buf399, 16, grid=grid(16), stream=stream0) buf400 = reinterpret_tensor(buf362, (4, 4, 1), (4, 1, 1), 0); del buf362 # reuse # Topologically Sorted Source Nodes: [matmul_79], Original ATen: [aten.bmm] extern_kernels.bmm(buf395, buf399, out=buf400) buf405 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf401 = reinterpret_tensor(buf405, (4, 4, 4), (32, 8, 1), 4) # alias # Topologically Sorted Source Nodes: [q_update_19], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf351, buf367, buf383, buf400, buf401, 64, grid=grid(64), stream=stream0) buf404 = reinterpret_tensor(buf405, (4, 4, 4), (32, 8, 1), 0) # alias # Topologically Sorted Source Nodes: [cat_q_2], Original ATen: [aten.cat] triton_poi_fused_cat_18.run(buf329, buf404, 64, grid=grid(64), stream=stream0) buf407 = buf329; del buf329 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf405, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf407) buf409 = reinterpret_tensor(buf400, (4, 4), (4, 1), 0); del buf400 # reuse buf412 = buf409; del buf409 # reuse # Topologically Sorted Source Nodes: [mul_23, sum_11, q_mean_2, relu_15], Original ATen: [aten.mul, aten.sum, aten.div, aten.relu] triton_poi_fused_div_mul_relu_sum_20.run(buf412, buf407, primals_16, primals_8, 16, grid=grid(16), stream=stream0) buf411 = reinterpret_tensor(buf383, (4, 4), (4, 1), 0); del buf383 # reuse # Topologically Sorted Source Nodes: [linear_26], Original ATen: [aten.addmm] extern_kernels.addmm(primals_18, buf410, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf411) buf413 = reinterpret_tensor(buf367, (4, 4), (4, 1), 0); del buf367 # reuse # Topologically Sorted Source Nodes: [linear_27], Original ATen: [aten.addmm] extern_kernels.addmm(primals_20, buf412, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf413) buf414 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf663 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_16], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_21.run(buf406, primals_14, buf414, buf663, 64, grid=grid(64), stream=stream0) buf415 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf414, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 12), (1, 4), 0), out=buf415) buf416 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf662 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_17], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_21.run(buf407, primals_16, buf416, buf662, 64, grid=grid(64), stream=stream0) buf417 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf416, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf417) buf418 = reinterpret_tensor(buf415, (4, 4, 12), (48, 12, 1), 0); del buf415 # reuse # Topologically Sorted Source Nodes: [v_trans_11], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf418, primals_22, primals_7, 192, grid=grid(192), stream=stream0) buf419 = reinterpret_tensor(buf417, (4, 4, 12), (48, 12, 1), 0); del buf417 # reuse # Topologically Sorted Source Nodes: [q_trans_11], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf419, primals_24, primals_8, 192, grid=grid(192), stream=stream0) buf420 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf421 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_12, new_vq_2, new_vk_2], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_22.run(buf413, buf418, buf420, buf421, 64, grid=grid(64), stream=stream0) buf422 = reinterpret_tensor(buf351, (4, 4, 1), (4, 1, 16), 0); del buf351 # reuse # Topologically Sorted Source Nodes: [matmul_80], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf420, buf422, 16, grid=grid(16), stream=stream0) buf423 = reinterpret_tensor(buf399, (4, 1, 4), (4, 16, 1), 0); del buf399 # reuse # Topologically Sorted Source Nodes: [matmul_80], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf421, buf423, 16, grid=grid(16), stream=stream0) buf424 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_80], Original ATen: [aten.bmm] extern_kernels.bmm(buf422, buf423, out=buf424) buf425 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf426 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_14, new_qq_2, new_qk_2], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_22.run(buf411, buf419, buf425, buf426, 64, grid=grid(64), stream=stream0) buf427 = reinterpret_tensor(buf423, (4, 4, 1), (4, 1, 16), 0); del buf423 # reuse # Topologically Sorted Source Nodes: [matmul_81], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf425, buf427, 16, grid=grid(16), stream=stream0) buf428 = reinterpret_tensor(buf422, (4, 1, 4), (4, 16, 1), 0); del buf422 # reuse # Topologically Sorted Source Nodes: [matmul_81], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf426, buf428, 16, grid=grid(16), stream=stream0) buf429 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_81], Original ATen: [aten.bmm] extern_kernels.bmm(buf427, buf428, out=buf429) buf440 = reinterpret_tensor(buf428, (4, 4, 1), (4, 1, 16), 0); del buf428 # reuse # Topologically Sorted Source Nodes: [matmul_84], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf420, buf440, 16, grid=grid(16), stream=stream0) buf441 = reinterpret_tensor(buf427, (4, 1, 4), (4, 16, 1), 0); del buf427 # reuse # Topologically Sorted Source Nodes: [matmul_84], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf421, buf441, 16, grid=grid(16), stream=stream0) buf442 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_84], Original ATen: [aten.bmm] extern_kernels.bmm(buf440, buf441, out=buf442) buf456 = reinterpret_tensor(buf441, (4, 4, 1), (4, 1, 16), 0); del buf441 # reuse # Topologically Sorted Source Nodes: [matmul_88], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf420, buf456, 16, grid=grid(16), stream=stream0) buf457 = reinterpret_tensor(buf440, (4, 1, 4), (4, 16, 1), 0); del buf440 # reuse # Topologically Sorted Source Nodes: [matmul_88], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf421, buf457, 16, grid=grid(16), stream=stream0) buf458 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_88], Original ATen: [aten.bmm] extern_kernels.bmm(buf456, buf457, out=buf458) buf472 = reinterpret_tensor(buf457, (4, 4, 1), (4, 1, 16), 0); del buf457 # reuse # Topologically Sorted Source Nodes: [matmul_92], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf420, buf472, 16, grid=grid(16), stream=stream0) buf473 = reinterpret_tensor(buf456, (4, 1, 4), (4, 16, 1), 0); del buf456 # reuse # Topologically Sorted Source Nodes: [matmul_92], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf421, buf473, 16, grid=grid(16), stream=stream0) buf474 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_92], Original ATen: [aten.bmm] extern_kernels.bmm(buf472, buf473, out=buf474) buf430 = reinterpret_tensor(buf473, (4, 4, 1), (4, 1, 16), 0); del buf473 # reuse buf431 = buf472; del buf472 # reuse buf446 = buf361; del buf361 # reuse buf447 = buf346; del buf346 # reuse buf462 = buf345; del buf345 # reuse buf463 = reinterpret_tensor(buf349, (4, 4, 1), (4, 1, 16), 0); del buf349 # reuse buf478 = buf396; del buf396 # reuse buf479 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_40, dyIntraMAF_v2v_8, masked_fill_42, dyIntraMAF_v2v_9, masked_fill_44, dyIntraMAF_v2v_10, masked_fill_46, dyIntraMAF_v2v_11], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_7, buf424, buf442, buf458, buf474, buf430, buf431, buf446, buf447, buf462, buf463, buf478, buf479, 16, grid=grid(16), stream=stream0) buf432 = buf424; del buf424 # reuse buf448 = buf442; del buf442 # reuse buf464 = buf458; del buf458 # reuse buf480 = buf474; del buf474 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_40, dyIntraMAF_v2v_8, masked_fill_42, dyIntraMAF_v2v_9, masked_fill_44, dyIntraMAF_v2v_10, masked_fill_46, dyIntraMAF_v2v_11], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf432, buf448, buf464, buf480, primals_7, buf430, buf431, buf446, buf447, buf462, buf463, buf478, buf479, 64, grid=grid(64), stream=stream0) buf443 = buf479; del buf479 # reuse # Topologically Sorted Source Nodes: [matmul_85], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf425, buf443, 16, grid=grid(16), stream=stream0) buf444 = reinterpret_tensor(buf478, (4, 1, 4), (4, 16, 1), 0); del buf478 # reuse # Topologically Sorted Source Nodes: [matmul_85], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf426, buf444, 16, grid=grid(16), stream=stream0) buf445 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_85], Original ATen: [aten.bmm] extern_kernels.bmm(buf443, buf444, out=buf445) buf459 = reinterpret_tensor(buf444, (4, 4, 1), (4, 1, 16), 0); del buf444 # reuse # Topologically Sorted Source Nodes: [matmul_89], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf425, buf459, 16, grid=grid(16), stream=stream0) buf460 = reinterpret_tensor(buf443, (4, 1, 4), (4, 16, 1), 0); del buf443 # reuse # Topologically Sorted Source Nodes: [matmul_89], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf426, buf460, 16, grid=grid(16), stream=stream0) buf461 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_89], Original ATen: [aten.bmm] extern_kernels.bmm(buf459, buf460, out=buf461) buf475 = reinterpret_tensor(buf460, (4, 4, 1), (4, 1, 16), 0); del buf460 # reuse # Topologically Sorted Source Nodes: [matmul_93], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf425, buf475, 16, grid=grid(16), stream=stream0) buf476 = reinterpret_tensor(buf459, (4, 1, 4), (4, 16, 1), 0); del buf459 # reuse # Topologically Sorted Source Nodes: [matmul_93], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf426, buf476, 16, grid=grid(16), stream=stream0) buf477 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_93], Original ATen: [aten.bmm] extern_kernels.bmm(buf475, buf476, out=buf477) buf433 = reinterpret_tensor(buf476, (4, 4, 1), (4, 1, 16), 0); del buf476 # reuse buf434 = buf475; del buf475 # reuse buf449 = buf463; del buf463 # reuse buf450 = buf462; del buf462 # reuse buf465 = buf447; del buf447 # reuse buf466 = buf446; del buf446 # reuse buf481 = buf431; del buf431 # reuse buf482 = buf430; del buf430 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_41, dyIntraMAF_q2q_8, masked_fill_43, dyIntraMAF_q2q_9, masked_fill_45, dyIntraMAF_q2q_10, masked_fill_47, dyIntraMAF_q2q_11], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_8, buf429, buf445, buf461, buf477, buf433, buf434, buf449, buf450, buf465, buf466, buf481, buf482, 16, grid=grid(16), stream=stream0) buf435 = buf429; del buf429 # reuse buf451 = buf445; del buf445 # reuse buf467 = buf461; del buf461 # reuse buf483 = buf477; del buf477 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_41, dyIntraMAF_q2q_8, masked_fill_43, dyIntraMAF_q2q_9, masked_fill_45, dyIntraMAF_q2q_10, masked_fill_47, dyIntraMAF_q2q_11], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf435, buf451, buf467, buf483, primals_8, buf433, buf434, buf449, buf450, buf465, buf466, buf481, buf482, 64, grid=grid(64), stream=stream0) buf436 = buf482; del buf482 # reuse # Topologically Sorted Source Nodes: [v_update_20], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf418, buf436, 16, grid=grid(16), stream=stream0) buf437 = reinterpret_tensor(buf481, (4, 4, 1), (4, 1, 1), 0); del buf481 # reuse # Topologically Sorted Source Nodes: [v_update_20], Original ATen: [aten.bmm] extern_kernels.bmm(buf432, buf436, out=buf437) buf438 = buf436; del buf436 # reuse # Topologically Sorted Source Nodes: [q_update_20], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf419, buf438, 16, grid=grid(16), stream=stream0) buf439 = reinterpret_tensor(buf466, (4, 4, 1), (4, 1, 1), 0); del buf466 # reuse # Topologically Sorted Source Nodes: [q_update_20], Original ATen: [aten.bmm] extern_kernels.bmm(buf435, buf438, out=buf439) buf452 = buf438; del buf438 # reuse # Topologically Sorted Source Nodes: [matmul_86], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf418, buf452, 16, grid=grid(16), stream=stream0) buf453 = reinterpret_tensor(buf465, (4, 4, 1), (4, 1, 1), 0); del buf465 # reuse # Topologically Sorted Source Nodes: [matmul_86], Original ATen: [aten.bmm] extern_kernels.bmm(buf448, buf452, out=buf453) buf454 = buf452; del buf452 # reuse # Topologically Sorted Source Nodes: [matmul_87], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf419, buf454, 16, grid=grid(16), stream=stream0) buf455 = reinterpret_tensor(buf450, (4, 4, 1), (4, 1, 1), 0); del buf450 # reuse # Topologically Sorted Source Nodes: [matmul_87], Original ATen: [aten.bmm] extern_kernels.bmm(buf451, buf454, out=buf455) buf468 = buf454; del buf454 # reuse # Topologically Sorted Source Nodes: [matmul_90], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf418, buf468, 16, grid=grid(16), stream=stream0) buf469 = reinterpret_tensor(buf449, (4, 4, 1), (4, 1, 1), 0); del buf449 # reuse # Topologically Sorted Source Nodes: [matmul_90], Original ATen: [aten.bmm] extern_kernels.bmm(buf464, buf468, out=buf469) buf470 = buf468; del buf468 # reuse # Topologically Sorted Source Nodes: [matmul_91], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf419, buf470, 16, grid=grid(16), stream=stream0) buf471 = reinterpret_tensor(buf434, (4, 4, 1), (4, 1, 1), 0); del buf434 # reuse # Topologically Sorted Source Nodes: [matmul_91], Original ATen: [aten.bmm] extern_kernels.bmm(buf467, buf470, out=buf471) buf484 = buf470; del buf470 # reuse # Topologically Sorted Source Nodes: [matmul_94], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf418, buf484, 16, grid=grid(16), stream=stream0) buf485 = reinterpret_tensor(buf433, (4, 4, 1), (4, 1, 1), 0); del buf433 # reuse # Topologically Sorted Source Nodes: [matmul_94], Original ATen: [aten.bmm] extern_kernels.bmm(buf480, buf484, out=buf485) buf490 = reinterpret_tensor(buf406, (4, 4, 4), (16, 4, 1), 0); del buf406 # reuse # Topologically Sorted Source Nodes: [v_update_23, add_16], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_28.run(buf490, buf437, buf453, buf469, buf485, primals_14, 64, grid=grid(64), stream=stream0) buf487 = reinterpret_tensor(buf485, (4, 4, 1), (4, 1, 16), 0); del buf485 # reuse # Topologically Sorted Source Nodes: [matmul_95], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf419, buf487, 16, grid=grid(16), stream=stream0) buf488 = buf469; del buf469 # reuse # Topologically Sorted Source Nodes: [matmul_95], Original ATen: [aten.bmm] extern_kernels.bmm(buf483, buf487, out=buf488) buf492 = reinterpret_tensor(buf407, (4, 4, 4), (16, 4, 1), 0); del buf407 # reuse # Topologically Sorted Source Nodes: [q_update_23, add_17], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_28.run(buf492, buf439, buf455, buf471, buf488, primals_16, 64, grid=grid(64), stream=stream0) buf491 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [updated_v_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_26, reinterpret_tensor(buf490, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf491) buf493 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [updated_q_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_28, reinterpret_tensor(buf492, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf493) buf494 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf661 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_18], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf491, buf494, buf661, 64, grid=grid(64), stream=stream0) buf495 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf494, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf495) buf496 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf660 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_19], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf493, buf496, buf660, 64, grid=grid(64), stream=stream0) buf497 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf496, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 12), (1, 4), 0), out=buf497) buf498 = reinterpret_tensor(buf495, (4, 4, 12), (48, 12, 1), 0); del buf495 # reuse # Topologically Sorted Source Nodes: [v_trans_13], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf498, primals_10, primals_7, 192, grid=grid(192), stream=stream0) del primals_10 buf499 = reinterpret_tensor(buf497, (4, 4, 12), (48, 12, 1), 0); del buf497 # reuse # Topologically Sorted Source Nodes: [q_trans_13], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf499, primals_12, primals_8, 192, grid=grid(192), stream=stream0) del primals_12 buf500 = reinterpret_tensor(buf488, (4, 4, 1), (4, 1, 16), 0); del buf488 # reuse # Topologically Sorted Source Nodes: [matmul_96], Original ATen: [aten.bmm] triton_poi_fused_bmm_2.run(buf498, buf500, 16, grid=grid(16), stream=stream0) buf501 = reinterpret_tensor(buf471, (4, 1, 4), (4, 16, 1), 0); del buf471 # reuse # Topologically Sorted Source Nodes: [matmul_96], Original ATen: [aten.bmm] triton_poi_fused_bmm_3.run(buf499, buf501, 16, grid=grid(16), stream=stream0) buf502 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_96], Original ATen: [aten.bmm] extern_kernels.bmm(buf500, buf501, out=buf502) buf503 = reinterpret_tensor(buf501, (4, 4, 1), (4, 1, 16), 0); del buf501 # reuse # Topologically Sorted Source Nodes: [matmul_97], Original ATen: [aten.bmm] triton_poi_fused_bmm_2.run(buf499, buf503, 16, grid=grid(16), stream=stream0) buf504 = reinterpret_tensor(buf500, (4, 1, 4), (4, 16, 1), 0); del buf500 # reuse # Topologically Sorted Source Nodes: [matmul_97], Original ATen: [aten.bmm] triton_poi_fused_bmm_3.run(buf498, buf504, 16, grid=grid(16), stream=stream0) buf505 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_97], Original ATen: [aten.bmm] extern_kernels.bmm(buf503, buf504, out=buf505) buf516 = reinterpret_tensor(buf504, (4, 4, 1), (4, 1, 16), 0); del buf504 # reuse # Topologically Sorted Source Nodes: [matmul_100], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf498, buf516, 16, grid=grid(16), stream=stream0) buf517 = reinterpret_tensor(buf503, (4, 1, 4), (4, 16, 1), 0); del buf503 # reuse # Topologically Sorted Source Nodes: [matmul_100], Original ATen: [aten.bmm] triton_poi_fused_bmm_5.run(buf499, buf517, 16, grid=grid(16), stream=stream0) buf518 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_100], Original ATen: [aten.bmm] extern_kernels.bmm(buf516, buf517, out=buf518) buf532 = reinterpret_tensor(buf517, (4, 4, 1), (4, 1, 16), 0); del buf517 # reuse # Topologically Sorted Source Nodes: [matmul_104], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf498, buf532, 16, grid=grid(16), stream=stream0) buf533 = reinterpret_tensor(buf516, (4, 1, 4), (4, 16, 1), 0); del buf516 # reuse # Topologically Sorted Source Nodes: [matmul_104], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf499, buf533, 16, grid=grid(16), stream=stream0) buf534 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_104], Original ATen: [aten.bmm] extern_kernels.bmm(buf532, buf533, out=buf534) buf548 = reinterpret_tensor(buf533, (4, 4, 1), (4, 1, 16), 0); del buf533 # reuse # Topologically Sorted Source Nodes: [matmul_108], Original ATen: [aten.bmm] triton_poi_fused_bmm_8.run(buf498, buf548, 16, grid=grid(16), stream=stream0) buf549 = reinterpret_tensor(buf532, (4, 1, 4), (4, 16, 1), 0); del buf532 # reuse # Topologically Sorted Source Nodes: [matmul_108], Original ATen: [aten.bmm] triton_poi_fused_bmm_9.run(buf499, buf549, 16, grid=grid(16), stream=stream0) buf550 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_108], Original ATen: [aten.bmm] extern_kernels.bmm(buf548, buf549, out=buf550) buf506 = reinterpret_tensor(buf549, (4, 4, 1), (4, 1, 16), 0); del buf549 # reuse buf507 = buf548; del buf548 # reuse buf522 = reinterpret_tensor(buf455, (4, 4, 1), (4, 1, 16), 0); del buf455 # reuse buf523 = reinterpret_tensor(buf439, (4, 4, 1), (4, 1, 16), 0); del buf439 # reuse buf538 = buf487; del buf487 # reuse buf539 = reinterpret_tensor(buf453, (4, 4, 1), (4, 1, 16), 0); del buf453 # reuse buf554 = reinterpret_tensor(buf437, (4, 4, 1), (4, 1, 16), 0); del buf437 # reuse buf555 = buf484; del buf484 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_48, interMAF_q2v_12, masked_fill_50, interMAF_q2v_13, masked_fill_52, interMAF_q2v_14, masked_fill_54, interMAF_q2v_15], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_8, buf502, buf518, buf534, buf550, buf506, buf507, buf522, buf523, buf538, buf539, buf554, buf555, 16, grid=grid(16), stream=stream0) buf508 = buf502; del buf502 # reuse buf524 = buf518; del buf518 # reuse buf540 = buf534; del buf534 # reuse buf556 = buf550; del buf550 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_48, interMAF_q2v_12, masked_fill_50, interMAF_q2v_13, masked_fill_52, interMAF_q2v_14, masked_fill_54, interMAF_q2v_15], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf508, buf524, buf540, buf556, primals_8, buf506, buf507, buf522, buf523, buf538, buf539, buf554, buf555, 64, grid=grid(64), stream=stream0) buf519 = buf555; del buf555 # reuse # Topologically Sorted Source Nodes: [matmul_101], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf499, buf519, 16, grid=grid(16), stream=stream0) buf520 = reinterpret_tensor(buf554, (4, 1, 4), (4, 16, 1), 0); del buf554 # reuse # Topologically Sorted Source Nodes: [matmul_101], Original ATen: [aten.bmm] triton_poi_fused_bmm_5.run(buf498, buf520, 16, grid=grid(16), stream=stream0) buf521 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_101], Original ATen: [aten.bmm] extern_kernels.bmm(buf519, buf520, out=buf521) buf535 = reinterpret_tensor(buf520, (4, 4, 1), (4, 1, 16), 0); del buf520 # reuse # Topologically Sorted Source Nodes: [matmul_105], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf499, buf535, 16, grid=grid(16), stream=stream0) buf536 = reinterpret_tensor(buf519, (4, 1, 4), (4, 16, 1), 0); del buf519 # reuse # Topologically Sorted Source Nodes: [matmul_105], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf498, buf536, 16, grid=grid(16), stream=stream0) buf537 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_105], Original ATen: [aten.bmm] extern_kernels.bmm(buf535, buf536, out=buf537) buf509 = reinterpret_tensor(buf536, (4, 4, 1), (4, 1, 16), 0); del buf536 # reuse buf510 = buf535; del buf535 # reuse buf525 = buf539; del buf539 # reuse buf526 = buf538; del buf538 # reuse buf541 = buf523; del buf523 # reuse buf542 = buf522; del buf522 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_49, interMAF_v2q_12, masked_fill_51, interMAF_v2q_13, masked_fill_53, interMAF_v2q_14], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_12.run(primals_7, buf505, buf521, buf537, buf509, buf510, buf525, buf526, buf541, buf542, 16, grid=grid(16), stream=stream0) buf551 = buf507; del buf507 # reuse # Topologically Sorted Source Nodes: [matmul_109], Original ATen: [aten.bmm] triton_poi_fused_bmm_8.run(buf499, buf551, 16, grid=grid(16), stream=stream0) buf552 = reinterpret_tensor(buf506, (4, 1, 4), (4, 16, 1), 0); del buf506 # reuse # Topologically Sorted Source Nodes: [matmul_109], Original ATen: [aten.bmm] triton_poi_fused_bmm_9.run(buf498, buf552, 16, grid=grid(16), stream=stream0) buf553 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_109], Original ATen: [aten.bmm] extern_kernels.bmm(buf551, buf552, out=buf553) buf512 = reinterpret_tensor(buf552, (4, 4, 1), (4, 1, 16), 0); del buf552 # reuse # Topologically Sorted Source Nodes: [v_update_24], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf499, buf512, 16, grid=grid(16), stream=stream0) buf513 = reinterpret_tensor(buf551, (4, 4, 1), (4, 1, 1), 0); del buf551 # reuse # Topologically Sorted Source Nodes: [v_update_24], Original ATen: [aten.bmm] extern_kernels.bmm(buf508, buf512, out=buf513) buf528 = buf512; del buf512 # reuse # Topologically Sorted Source Nodes: [matmul_102], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf499, buf528, 16, grid=grid(16), stream=stream0) buf529 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_102], Original ATen: [aten.bmm] extern_kernels.bmm(buf524, buf528, out=buf529) buf544 = buf528; del buf528 # reuse # Topologically Sorted Source Nodes: [matmul_106], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf499, buf544, 16, grid=grid(16), stream=stream0) buf545 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_106], Original ATen: [aten.bmm] extern_kernels.bmm(buf540, buf544, out=buf545) buf560 = buf544; del buf544 # reuse # Topologically Sorted Source Nodes: [matmul_110], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf499, buf560, 16, grid=grid(16), stream=stream0) buf561 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_110], Original ATen: [aten.bmm] extern_kernels.bmm(buf556, buf560, out=buf561) buf567 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf562 = reinterpret_tensor(buf567, (4, 4, 4), (32, 8, 1), 4) # alias # Topologically Sorted Source Nodes: [v_update_27], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf513, buf529, buf545, buf561, buf562, 64, grid=grid(64), stream=stream0) buf566 = reinterpret_tensor(buf567, (4, 4, 4), (32, 8, 1), 0) # alias # Topologically Sorted Source Nodes: [cat_v_3], Original ATen: [aten.cat] triton_poi_fused_cat_18.run(buf491, buf566, 64, grid=grid(64), stream=stream0) buf570 = buf491; del buf491 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf567, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf570) buf557 = reinterpret_tensor(buf561, (4, 4, 1), (4, 1, 16), 0); del buf561 # reuse buf558 = reinterpret_tensor(buf545, (4, 4, 1), (4, 1, 16), 0); del buf545 # reuse buf572 = reinterpret_tensor(buf529, (4, 4), (4, 1), 0); del buf529 # reuse buf574 = buf572; del buf572 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_55, interMAF_v2q_15, mul_32, sum_13, v_mean_3, relu_20], Original ATen: [aten.masked_fill, aten.eq, aten._softmax, aten.mul, aten.sum, aten.div, aten.relu] triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19.run(buf574, primals_7, buf553, buf570, primals_14, buf557, buf558, 16, grid=grid(16), stream=stream0) buf511 = buf505; del buf505 # reuse buf527 = buf521; del buf521 # reuse buf543 = buf537; del buf537 # reuse buf559 = buf553; del buf553 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_49, interMAF_v2q_12, masked_fill_51, interMAF_v2q_13, masked_fill_53, interMAF_v2q_14, masked_fill_55, interMAF_v2q_15], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf511, buf527, buf543, buf559, primals_7, buf509, buf510, buf525, buf526, buf541, buf542, buf557, buf558, 64, grid=grid(64), stream=stream0) buf514 = buf558; del buf558 # reuse # Topologically Sorted Source Nodes: [q_update_24], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf498, buf514, 16, grid=grid(16), stream=stream0) buf515 = reinterpret_tensor(buf557, (4, 4, 1), (4, 1, 1), 0); del buf557 # reuse # Topologically Sorted Source Nodes: [q_update_24], Original ATen: [aten.bmm] extern_kernels.bmm(buf511, buf514, out=buf515) buf530 = buf514; del buf514 # reuse # Topologically Sorted Source Nodes: [matmul_103], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf498, buf530, 16, grid=grid(16), stream=stream0) buf531 = reinterpret_tensor(buf542, (4, 4, 1), (4, 1, 1), 0); del buf542 # reuse # Topologically Sorted Source Nodes: [matmul_103], Original ATen: [aten.bmm] extern_kernels.bmm(buf527, buf530, out=buf531) buf546 = buf530; del buf530 # reuse # Topologically Sorted Source Nodes: [matmul_107], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf498, buf546, 16, grid=grid(16), stream=stream0) buf547 = reinterpret_tensor(buf541, (4, 4, 1), (4, 1, 1), 0); del buf541 # reuse # Topologically Sorted Source Nodes: [matmul_107], Original ATen: [aten.bmm] extern_kernels.bmm(buf543, buf546, out=buf547) buf563 = buf546; del buf546 # reuse # Topologically Sorted Source Nodes: [matmul_111], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf498, buf563, 16, grid=grid(16), stream=stream0) buf564 = reinterpret_tensor(buf526, (4, 4, 1), (4, 1, 1), 0); del buf526 # reuse # Topologically Sorted Source Nodes: [matmul_111], Original ATen: [aten.bmm] extern_kernels.bmm(buf559, buf563, out=buf564) buf569 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf565 = reinterpret_tensor(buf569, (4, 4, 4), (32, 8, 1), 4) # alias # Topologically Sorted Source Nodes: [q_update_27], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf515, buf531, buf547, buf564, buf565, 64, grid=grid(64), stream=stream0) buf568 = reinterpret_tensor(buf569, (4, 4, 4), (32, 8, 1), 0) # alias # Topologically Sorted Source Nodes: [cat_q_3], Original ATen: [aten.cat] triton_poi_fused_cat_18.run(buf493, buf568, 64, grid=grid(64), stream=stream0) buf571 = buf493; del buf493 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf569, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf571) buf573 = reinterpret_tensor(buf564, (4, 4), (4, 1), 0); del buf564 # reuse buf576 = buf573; del buf573 # reuse # Topologically Sorted Source Nodes: [mul_33, sum_15, q_mean_3, relu_21], Original ATen: [aten.mul, aten.sum, aten.div, aten.relu] triton_poi_fused_div_mul_relu_sum_20.run(buf576, buf571, primals_16, primals_8, 16, grid=grid(16), stream=stream0) buf575 = reinterpret_tensor(buf547, (4, 4), (4, 1), 0); del buf547 # reuse # Topologically Sorted Source Nodes: [linear_36], Original ATen: [aten.addmm] extern_kernels.addmm(primals_18, buf574, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf575) del primals_18 buf577 = reinterpret_tensor(buf531, (4, 4), (4, 1), 0); del buf531 # reuse # Topologically Sorted Source Nodes: [linear_37], Original ATen: [aten.addmm] extern_kernels.addmm(primals_20, buf576, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf577) del primals_20 buf578 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf659 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_22], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_21.run(buf570, primals_14, buf578, buf659, 64, grid=grid(64), stream=stream0) buf579 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf578, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 12), (1, 4), 0), out=buf579) buf580 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf658 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_23], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_21.run(buf571, primals_16, buf580, buf658, 64, grid=grid(64), stream=stream0) buf581 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf580, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf581) buf582 = reinterpret_tensor(buf579, (4, 4, 12), (48, 12, 1), 0); del buf579 # reuse # Topologically Sorted Source Nodes: [v_trans_15], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf582, primals_22, primals_7, 192, grid=grid(192), stream=stream0) del primals_22 buf583 = reinterpret_tensor(buf581, (4, 4, 12), (48, 12, 1), 0); del buf581 # reuse # Topologically Sorted Source Nodes: [q_trans_15], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf583, primals_24, primals_8, 192, grid=grid(192), stream=stream0) del primals_24 buf584 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf585 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_18, new_vq_3, new_vk_3], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_22.run(buf577, buf582, buf584, buf585, 64, grid=grid(64), stream=stream0) buf586 = reinterpret_tensor(buf515, (4, 4, 1), (4, 1, 16), 0); del buf515 # reuse # Topologically Sorted Source Nodes: [matmul_112], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf584, buf586, 16, grid=grid(16), stream=stream0) buf587 = reinterpret_tensor(buf563, (4, 1, 4), (4, 16, 1), 0); del buf563 # reuse # Topologically Sorted Source Nodes: [matmul_112], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf585, buf587, 16, grid=grid(16), stream=stream0) buf588 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_112], Original ATen: [aten.bmm] extern_kernels.bmm(buf586, buf587, out=buf588) buf589 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf590 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_20, new_qq_3, new_qk_3], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_22.run(buf575, buf583, buf589, buf590, 64, grid=grid(64), stream=stream0) buf591 = reinterpret_tensor(buf587, (4, 4, 1), (4, 1, 16), 0); del buf587 # reuse # Topologically Sorted Source Nodes: [matmul_113], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf589, buf591, 16, grid=grid(16), stream=stream0) buf592 = reinterpret_tensor(buf586, (4, 1, 4), (4, 16, 1), 0); del buf586 # reuse # Topologically Sorted Source Nodes: [matmul_113], Original ATen: [aten.bmm] triton_poi_fused_bmm_23.run(buf590, buf592, 16, grid=grid(16), stream=stream0) buf593 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_113], Original ATen: [aten.bmm] extern_kernels.bmm(buf591, buf592, out=buf593) buf604 = reinterpret_tensor(buf592, (4, 4, 1), (4, 1, 16), 0); del buf592 # reuse # Topologically Sorted Source Nodes: [matmul_116], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf584, buf604, 16, grid=grid(16), stream=stream0) buf605 = reinterpret_tensor(buf591, (4, 1, 4), (4, 16, 1), 0); del buf591 # reuse # Topologically Sorted Source Nodes: [matmul_116], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf585, buf605, 16, grid=grid(16), stream=stream0) buf606 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_116], Original ATen: [aten.bmm] extern_kernels.bmm(buf604, buf605, out=buf606) buf620 = reinterpret_tensor(buf605, (4, 4, 1), (4, 1, 16), 0); del buf605 # reuse # Topologically Sorted Source Nodes: [matmul_120], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf584, buf620, 16, grid=grid(16), stream=stream0) buf621 = reinterpret_tensor(buf604, (4, 1, 4), (4, 16, 1), 0); del buf604 # reuse # Topologically Sorted Source Nodes: [matmul_120], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf585, buf621, 16, grid=grid(16), stream=stream0) buf622 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_120], Original ATen: [aten.bmm] extern_kernels.bmm(buf620, buf621, out=buf622) buf636 = reinterpret_tensor(buf621, (4, 4, 1), (4, 1, 16), 0); del buf621 # reuse # Topologically Sorted Source Nodes: [matmul_124], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf584, buf636, 16, grid=grid(16), stream=stream0) buf637 = reinterpret_tensor(buf620, (4, 1, 4), (4, 16, 1), 0); del buf620 # reuse # Topologically Sorted Source Nodes: [matmul_124], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf585, buf637, 16, grid=grid(16), stream=stream0) buf638 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_124], Original ATen: [aten.bmm] extern_kernels.bmm(buf636, buf637, out=buf638) buf594 = reinterpret_tensor(buf637, (4, 4, 1), (4, 1, 16), 0); del buf637 # reuse buf595 = buf636; del buf636 # reuse buf610 = buf525; del buf525 # reuse buf611 = buf510; del buf510 # reuse buf626 = buf509; del buf509 # reuse buf627 = reinterpret_tensor(buf513, (4, 4, 1), (4, 1, 16), 0); del buf513 # reuse buf642 = buf560; del buf560 # reuse buf643 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_56, dyIntraMAF_v2v_12, masked_fill_58, dyIntraMAF_v2v_13, masked_fill_60, dyIntraMAF_v2v_14, masked_fill_62, dyIntraMAF_v2v_15], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_7, buf588, buf606, buf622, buf638, buf594, buf595, buf610, buf611, buf626, buf627, buf642, buf643, 16, grid=grid(16), stream=stream0) buf596 = buf588; del buf588 # reuse buf612 = buf606; del buf606 # reuse buf628 = buf622; del buf622 # reuse buf644 = buf638; del buf638 # reuse # Topologically Sorted Source Nodes: [masked_fill, eq_1, masked_fill_56, dyIntraMAF_v2v_12, masked_fill_58, dyIntraMAF_v2v_13, masked_fill_60, dyIntraMAF_v2v_14, masked_fill_62, dyIntraMAF_v2v_15], Original ATen: [aten.masked_fill, aten.eq, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf596, buf612, buf628, buf644, primals_7, buf594, buf595, buf610, buf611, buf626, buf627, buf642, buf643, 64, grid=grid(64), stream=stream0) buf607 = buf643; del buf643 # reuse # Topologically Sorted Source Nodes: [matmul_117], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf589, buf607, 16, grid=grid(16), stream=stream0) buf608 = reinterpret_tensor(buf642, (4, 1, 4), (4, 16, 1), 0); del buf642 # reuse # Topologically Sorted Source Nodes: [matmul_117], Original ATen: [aten.bmm] triton_poi_fused_bmm_24.run(buf590, buf608, 16, grid=grid(16), stream=stream0) buf609 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_117], Original ATen: [aten.bmm] extern_kernels.bmm(buf607, buf608, out=buf609) buf623 = reinterpret_tensor(buf608, (4, 4, 1), (4, 1, 16), 0); del buf608 # reuse # Topologically Sorted Source Nodes: [matmul_121], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf589, buf623, 16, grid=grid(16), stream=stream0) buf624 = reinterpret_tensor(buf607, (4, 1, 4), (4, 16, 1), 0); del buf607 # reuse # Topologically Sorted Source Nodes: [matmul_121], Original ATen: [aten.bmm] triton_poi_fused_bmm_25.run(buf590, buf624, 16, grid=grid(16), stream=stream0) buf625 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_121], Original ATen: [aten.bmm] extern_kernels.bmm(buf623, buf624, out=buf625) buf639 = reinterpret_tensor(buf624, (4, 4, 1), (4, 1, 16), 0); del buf624 # reuse # Topologically Sorted Source Nodes: [matmul_125], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf589, buf639, 16, grid=grid(16), stream=stream0) buf640 = reinterpret_tensor(buf623, (4, 1, 4), (4, 16, 1), 0); del buf623 # reuse # Topologically Sorted Source Nodes: [matmul_125], Original ATen: [aten.bmm] triton_poi_fused_bmm_26.run(buf590, buf640, 16, grid=grid(16), stream=stream0) buf641 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_125], Original ATen: [aten.bmm] extern_kernels.bmm(buf639, buf640, out=buf641) buf597 = reinterpret_tensor(buf640, (4, 4, 1), (4, 1, 16), 0); del buf640 # reuse buf598 = buf639; del buf639 # reuse buf613 = buf627; del buf627 # reuse buf614 = buf626; del buf626 # reuse buf629 = buf611; del buf611 # reuse buf630 = buf610; del buf610 # reuse buf645 = buf595; del buf595 # reuse buf646 = buf594; del buf594 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_57, dyIntraMAF_q2q_12, masked_fill_59, dyIntraMAF_q2q_13, masked_fill_61, dyIntraMAF_q2q_14, masked_fill_63, dyIntraMAF_q2q_15], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_10.run(primals_8, buf593, buf609, buf625, buf641, buf597, buf598, buf613, buf614, buf629, buf630, buf645, buf646, 16, grid=grid(16), stream=stream0) buf599 = buf593; del buf593 # reuse buf615 = buf609; del buf609 # reuse buf631 = buf625; del buf625 # reuse buf647 = buf641; del buf641 # reuse # Topologically Sorted Source Nodes: [eq, masked_fill, masked_fill_57, dyIntraMAF_q2q_12, masked_fill_59, dyIntraMAF_q2q_13, masked_fill_61, dyIntraMAF_q2q_14, masked_fill_63, dyIntraMAF_q2q_15], Original ATen: [aten.eq, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_11.run(buf599, buf615, buf631, buf647, primals_8, buf597, buf598, buf613, buf614, buf629, buf630, buf645, buf646, 64, grid=grid(64), stream=stream0) buf600 = buf646; del buf646 # reuse # Topologically Sorted Source Nodes: [v_update_28], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf582, buf600, 16, grid=grid(16), stream=stream0) buf601 = reinterpret_tensor(buf645, (4, 4, 1), (4, 1, 1), 0); del buf645 # reuse # Topologically Sorted Source Nodes: [v_update_28], Original ATen: [aten.bmm] extern_kernels.bmm(buf596, buf600, out=buf601) buf602 = buf600; del buf600 # reuse # Topologically Sorted Source Nodes: [q_update_28], Original ATen: [aten.bmm] triton_poi_fused_bmm_13.run(buf583, buf602, 16, grid=grid(16), stream=stream0) buf603 = reinterpret_tensor(buf630, (4, 4, 1), (4, 1, 1), 0); del buf630 # reuse # Topologically Sorted Source Nodes: [q_update_28], Original ATen: [aten.bmm] extern_kernels.bmm(buf599, buf602, out=buf603) buf616 = buf602; del buf602 # reuse # Topologically Sorted Source Nodes: [matmul_118], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf582, buf616, 16, grid=grid(16), stream=stream0) buf617 = reinterpret_tensor(buf629, (4, 4, 1), (4, 1, 1), 0); del buf629 # reuse # Topologically Sorted Source Nodes: [matmul_118], Original ATen: [aten.bmm] extern_kernels.bmm(buf612, buf616, out=buf617) buf618 = buf616; del buf616 # reuse # Topologically Sorted Source Nodes: [matmul_119], Original ATen: [aten.bmm] triton_poi_fused_bmm_14.run(buf583, buf618, 16, grid=grid(16), stream=stream0) buf619 = reinterpret_tensor(buf614, (4, 4, 1), (4, 1, 1), 0); del buf614 # reuse # Topologically Sorted Source Nodes: [matmul_119], Original ATen: [aten.bmm] extern_kernels.bmm(buf615, buf618, out=buf619) buf632 = buf618; del buf618 # reuse # Topologically Sorted Source Nodes: [matmul_122], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf582, buf632, 16, grid=grid(16), stream=stream0) buf633 = reinterpret_tensor(buf613, (4, 4, 1), (4, 1, 1), 0); del buf613 # reuse # Topologically Sorted Source Nodes: [matmul_122], Original ATen: [aten.bmm] extern_kernels.bmm(buf628, buf632, out=buf633) buf634 = buf632; del buf632 # reuse # Topologically Sorted Source Nodes: [matmul_123], Original ATen: [aten.bmm] triton_poi_fused_bmm_15.run(buf583, buf634, 16, grid=grid(16), stream=stream0) buf635 = reinterpret_tensor(buf598, (4, 4, 1), (4, 1, 1), 0); del buf598 # reuse # Topologically Sorted Source Nodes: [matmul_123], Original ATen: [aten.bmm] extern_kernels.bmm(buf631, buf634, out=buf635) buf648 = buf634; del buf634 # reuse # Topologically Sorted Source Nodes: [matmul_126], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf582, buf648, 16, grid=grid(16), stream=stream0) buf649 = reinterpret_tensor(buf597, (4, 4, 1), (4, 1, 1), 0); del buf597 # reuse # Topologically Sorted Source Nodes: [matmul_126], Original ATen: [aten.bmm] extern_kernels.bmm(buf644, buf648, out=buf649) del buf648 buf654 = reinterpret_tensor(buf570, (4, 4, 4), (16, 4, 1), 0); del buf570 # reuse # Topologically Sorted Source Nodes: [v_update_31, add_22], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_28.run(buf654, buf601, buf617, buf633, buf649, primals_14, 64, grid=grid(64), stream=stream0) del buf601 del buf617 del primals_14 buf651 = reinterpret_tensor(buf649, (4, 4, 1), (4, 1, 16), 0); del buf649 # reuse # Topologically Sorted Source Nodes: [matmul_127], Original ATen: [aten.bmm] triton_poi_fused_bmm_16.run(buf583, buf651, 16, grid=grid(16), stream=stream0) buf652 = buf633; del buf633 # reuse # Topologically Sorted Source Nodes: [matmul_127], Original ATen: [aten.bmm] extern_kernels.bmm(buf647, buf651, out=buf652) del buf651 buf656 = reinterpret_tensor(buf571, (4, 4, 4), (16, 4, 1), 0); del buf571 # reuse # Topologically Sorted Source Nodes: [q_update_31, add_23], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_28.run(buf656, buf603, buf619, buf635, buf652, primals_16, 64, grid=grid(64), stream=stream0) del buf603 del buf619 del buf635 del buf652 del primals_16 buf655 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [updated_v_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_26, reinterpret_tensor(buf654, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf655) del primals_26 buf657 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [updated_q_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_28, reinterpret_tensor(buf656, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf657) del primals_28 return (reinterpret_tensor(buf655, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf657, (4, 4, 4), (16, 4, 1), 0), primals_7, primals_8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 4), (4, 1), 0), buf16, buf19, buf32, buf35, buf48, buf51, buf64, buf67, reinterpret_tensor(buf75, (16, 8), (8, 1), 0), reinterpret_tensor(buf77, (16, 8), (8, 1), 0), buf82, buf83, buf84, buf85, reinterpret_tensor(buf86, (16, 4), (4, 1), 0), reinterpret_tensor(buf88, (16, 4), (4, 1), 0), reinterpret_tensor(buf90, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf90, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf91, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf91, (4, 4, 4), (48, 12, 1), 4), buf104, buf107, buf120, buf123, buf136, buf139, buf152, buf155, reinterpret_tensor(buf162, (16, 4), (4, 1), 0), reinterpret_tensor(buf164, (16, 4), (4, 1), 0), reinterpret_tensor(buf166, (16, 4), (4, 1), 0), reinterpret_tensor(buf168, (16, 4), (4, 1), 0), buf180, buf183, buf196, buf199, buf212, buf215, buf228, buf231, reinterpret_tensor(buf239, (16, 8), (8, 1), 0), reinterpret_tensor(buf241, (16, 8), (8, 1), 0), buf246, buf247, buf248, buf249, reinterpret_tensor(buf250, (16, 4), (4, 1), 0), reinterpret_tensor(buf252, (16, 4), (4, 1), 0), reinterpret_tensor(buf254, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf254, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf255, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf255, (4, 4, 4), (48, 12, 1), 4), buf268, buf271, buf284, buf287, buf300, buf303, buf316, buf319, reinterpret_tensor(buf326, (16, 4), (4, 1), 0), reinterpret_tensor(buf328, (16, 4), (4, 1), 0), reinterpret_tensor(buf330, (16, 4), (4, 1), 0), reinterpret_tensor(buf332, (16, 4), (4, 1), 0), buf344, buf347, buf360, buf363, buf376, buf379, buf392, buf395, reinterpret_tensor(buf403, (16, 8), (8, 1), 0), reinterpret_tensor(buf405, (16, 8), (8, 1), 0), buf410, buf411, buf412, buf413, reinterpret_tensor(buf414, (16, 4), (4, 1), 0), reinterpret_tensor(buf416, (16, 4), (4, 1), 0), reinterpret_tensor(buf418, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf418, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf419, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf419, (4, 4, 4), (48, 12, 1), 4), buf432, buf435, buf448, buf451, buf464, buf467, buf480, buf483, reinterpret_tensor(buf490, (16, 4), (4, 1), 0), reinterpret_tensor(buf492, (16, 4), (4, 1), 0), reinterpret_tensor(buf494, (16, 4), (4, 1), 0), reinterpret_tensor(buf496, (16, 4), (4, 1), 0), buf508, buf511, buf524, buf527, buf540, buf543, buf556, buf559, reinterpret_tensor(buf567, (16, 8), (8, 1), 0), reinterpret_tensor(buf569, (16, 8), (8, 1), 0), buf574, buf575, buf576, buf577, reinterpret_tensor(buf578, (16, 4), (4, 1), 0), reinterpret_tensor(buf580, (16, 4), (4, 1), 0), reinterpret_tensor(buf582, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf582, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf583, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf583, (4, 4, 4), (48, 12, 1), 4), buf596, buf599, buf612, buf615, buf628, buf631, buf644, buf647, reinterpret_tensor(buf654, (16, 4), (4, 1), 0), reinterpret_tensor(buf656, (16, 4), (4, 1), 0), primals_27, primals_25, reinterpret_tensor(buf583, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf582, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf589, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf590, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf584, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf585, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf583, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf582, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf589, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf590, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf584, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf585, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf583, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf582, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf589, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf590, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf584, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf585, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf583, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf582, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf589, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf590, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf584, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf585, (4, 4, 1), (16, 4, 1), 0), primals_23, buf658, primals_21, buf659, primals_19, primals_17, primals_15, primals_13, reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf498, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf499, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf498, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf499, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf498, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf499, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf498, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf499, (4, 4, 1), (48, 12, 1), 0), primals_11, buf660, primals_9, buf661, reinterpret_tensor(buf419, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf418, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf425, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf426, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf420, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf421, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf419, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf418, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf425, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf426, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf420, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf421, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf419, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf418, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf425, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf426, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf420, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf421, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf419, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf418, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf425, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf426, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf420, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf421, (4, 4, 1), (16, 4, 1), 0), buf662, buf663, reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf334, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf335, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf334, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf335, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf334, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf335, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf334, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf335, (4, 4, 1), (48, 12, 1), 0), buf664, buf665, reinterpret_tensor(buf255, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf254, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf262, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf256, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf257, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf255, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf254, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf262, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf256, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf257, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf255, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf254, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf262, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf256, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf257, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf255, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf254, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf262, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf256, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf257, (4, 4, 1), (16, 4, 1), 0), buf666, buf667, reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf170, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf171, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf170, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf171, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf170, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf171, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf170, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf171, (4, 4, 1), (48, 12, 1), 0), buf668, buf669, reinterpret_tensor(buf91, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf90, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf97, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf98, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf92, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf93, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf91, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf90, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf97, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf98, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf92, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf93, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf91, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf90, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf97, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf98, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf92, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf93, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf91, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf90, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf97, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf98, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf92, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf93, (4, 4, 1), (16, 4, 1), 0), buf670, buf671, reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf6, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf7, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf6, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf7, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf6, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf7, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf6, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf7, (4, 4, 1), (48, 12, 1), 0), buf672, buf673, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class DyIntraModalityUpdate(nn.Module): """ Dynamic Intra-modality Attention Flow """ def __init__(self, v_size, q_size, output_size, num_head, drop=0.0): super(DyIntraModalityUpdate, self).__init__() self.v_size = v_size self.q_size = q_size self.output_size = output_size self.num_head = num_head self.v4q_gate_lin = nn.Linear(v_size, output_size) self.q4v_gate_lin = nn.Linear(q_size, output_size) self.v_lin = nn.Linear(v_size, output_size * 3) self.q_lin = nn.Linear(q_size, output_size * 3) self.v_output = nn.Linear(output_size, output_size) self.q_output = nn.Linear(output_size, output_size) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.drop = nn.Dropout(drop) def forward(self, v, q, v_mask, q_mask): """ v: visual feature [batch, num_obj, feat_size] q: question [batch, max_len, feat_size] v_mask [batch, num_obj] q_mask [batch, max_len] """ batch_size, num_obj = v_mask.shape _, max_len = q_mask.shape v_mean = (v * v_mask.unsqueeze(2)).sum(1) / v_mask.sum(1).unsqueeze(1) q_mean = (q * q_mask.unsqueeze(2)).sum(1) / q_mask.sum(1).unsqueeze(1) v4q_gate = self.sigmoid(self.v4q_gate_lin(self.drop(self.relu(v_mean))) ).unsqueeze(1) q4v_gate = self.sigmoid(self.q4v_gate_lin(self.drop(self.relu(q_mean))) ).unsqueeze(1) v_trans = self.v_lin(self.drop(self.relu(v))) q_trans = self.q_lin(self.drop(self.relu(q))) v_trans = v_trans * v_mask.unsqueeze(2) q_trans = q_trans * q_mask.unsqueeze(2) v_k, v_q, v_v = torch.split(v_trans, v_trans.size(2) // 3, dim=2) q_k, q_q, q_v = torch.split(q_trans, q_trans.size(2) // 3, dim=2) new_vq = (1 + q4v_gate) * v_q new_vk = (1 + q4v_gate) * v_k new_qq = (1 + v4q_gate) * q_q new_qk = (1 + v4q_gate) * q_k vk_set = torch.split(new_vk, new_vk.size(2) // self.num_head, dim=2) vq_set = torch.split(new_vq, new_vq.size(2) // self.num_head, dim=2) vv_set = torch.split(v_v, v_v.size(2) // self.num_head, dim=2) qk_set = torch.split(new_qk, new_qk.size(2) // self.num_head, dim=2) qq_set = torch.split(new_qq, new_qq.size(2) // self.num_head, dim=2) qv_set = torch.split(q_v, q_v.size(2) // self.num_head, dim=2) for i in range(self.num_head): vk_slice, vq_slice, vv_slice = vk_set[i], vq_set[i], vv_set[i] qk_slice, qq_slice, qv_slice = qk_set[i], qq_set[i], qv_set[i] v2v = (vq_slice @ vk_slice.transpose(1, 2)).masked_fill(v_mask. unsqueeze(1).expand([batch_size, num_obj, num_obj]) == 0, - 1000000000.0) / (self.output_size // self.num_head) ** 0.5 q2q = (qq_slice @ qk_slice.transpose(1, 2)).masked_fill(q_mask. unsqueeze(1).expand([batch_size, max_len, max_len]) == 0, - 1000000000.0) / (self.output_size // self.num_head) ** 0.5 dyIntraMAF_v2v = F.softmax(v2v, dim=2) dyIntraMAF_q2q = F.softmax(q2q, dim=2) v_update = dyIntraMAF_v2v @ vv_slice if i == 0 else torch.cat(( v_update, dyIntraMAF_v2v @ vv_slice), dim=2) q_update = dyIntraMAF_q2q @ qv_slice if i == 0 else torch.cat(( q_update, dyIntraMAF_q2q @ qv_slice), dim=2) updated_v = self.v_output(self.drop(v + v_update)) updated_q = self.q_output(self.drop(q + q_update)) return updated_v, updated_q class InterModalityUpdate(nn.Module): """ Inter-modality Attention Flow """ def __init__(self, v_size, q_size, output_size, num_head, drop=0.0): super(InterModalityUpdate, self).__init__() self.v_size = v_size self.q_size = q_size self.output_size = output_size self.num_head = num_head self.v_lin = nn.Linear(v_size, output_size * 3) self.q_lin = nn.Linear(q_size, output_size * 3) self.v_output = nn.Linear(output_size + v_size, output_size) self.q_output = nn.Linear(output_size + q_size, output_size) self.relu = nn.ReLU() self.drop = nn.Dropout(drop) def forward(self, v, q, v_mask, q_mask): """ v: visual feature [batch, num_obj, feat_size] q: question [batch, max_len, feat_size] v_mask [batch, num_obj] q_mask [batch, max_len] """ batch_size, num_obj = v_mask.shape _, max_len = q_mask.shape v_trans = self.v_lin(self.drop(self.relu(v))) q_trans = self.q_lin(self.drop(self.relu(q))) v_trans = v_trans * v_mask.unsqueeze(2) q_trans = q_trans * q_mask.unsqueeze(2) v_k, v_q, v_v = torch.split(v_trans, v_trans.size(2) // 3, dim=2) q_k, q_q, q_v = torch.split(q_trans, q_trans.size(2) // 3, dim=2) vk_set = torch.split(v_k, v_k.size(2) // self.num_head, dim=2) vq_set = torch.split(v_q, v_q.size(2) // self.num_head, dim=2) vv_set = torch.split(v_v, v_v.size(2) // self.num_head, dim=2) qk_set = torch.split(q_k, q_k.size(2) // self.num_head, dim=2) qq_set = torch.split(q_q, q_q.size(2) // self.num_head, dim=2) qv_set = torch.split(q_v, q_v.size(2) // self.num_head, dim=2) for i in range(self.num_head): vk_slice, vq_slice, vv_slice = vk_set[i], vq_set[i], vv_set[i] qk_slice, qq_slice, qv_slice = qk_set[i], qq_set[i], qv_set[i] q2v = (vq_slice @ qk_slice.transpose(1, 2)).masked_fill(q_mask. unsqueeze(1).expand([batch_size, num_obj, max_len]) == 0, - 1000000000.0) / (self.output_size // self.num_head) ** 0.5 v2q = (qq_slice @ vk_slice.transpose(1, 2)).masked_fill(v_mask. unsqueeze(1).expand([batch_size, max_len, num_obj]) == 0, - 1000000000.0) / (self.output_size // self.num_head) ** 0.5 interMAF_q2v = F.softmax(q2v, dim=2) interMAF_v2q = F.softmax(v2q, dim=2) v_update = interMAF_q2v @ qv_slice if i == 0 else torch.cat(( v_update, interMAF_q2v @ qv_slice), dim=2) q_update = interMAF_v2q @ vv_slice if i == 0 else torch.cat(( q_update, interMAF_v2q @ vv_slice), dim=2) cat_v = torch.cat((v, v_update), dim=2) cat_q = torch.cat((q, q_update), dim=2) updated_v = self.v_output(self.drop(cat_v)) updated_q = self.q_output(self.drop(cat_q)) return updated_v, updated_q class SingleBlock(nn.Module): """ Single Block Inter-/Intra-modality stack multiple times """ def __init__(self, num_block, v_size, q_size, output_size, num_inter_head, num_intra_head, drop=0.0): super(SingleBlock, self).__init__() self.v_size = v_size self.q_size = q_size self.output_size = output_size self.num_inter_head = num_inter_head self.num_intra_head = num_intra_head self.num_block = num_block self.v_lin = nn.Linear(v_size, output_size) self.q_lin = nn.Linear(q_size, output_size) self.interBlock = InterModalityUpdate(output_size, output_size, output_size, num_inter_head, drop) self.intraBlock = DyIntraModalityUpdate(output_size, output_size, output_size, num_intra_head, drop) self.drop = nn.Dropout(drop) def forward(self, v, q, v_mask, q_mask): """ v: visual feature [batch, num_obj, feat_size] q: question [batch, max_len, feat_size] v_mask [batch, num_obj] q_mask [batch, max_len] """ v = self.v_lin(self.drop(v)) q = self.q_lin(self.drop(q)) for i in range(self.num_block): v, q = self.interBlock(v, q, v_mask, q_mask) v, q = self.intraBlock(v, q, v_mask, q_mask) return v, q def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4] ), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_block': 4, 'v_size': 4, 'q_size': 4, 'output_size': 4, 'num_inter_head': 4, 'num_intra_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 x1 = xindex // 12 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_bmm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 12 * x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (5 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (6 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (7 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp41 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp52 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp71 = tl.load(in_ptr3 + 4 * x2, xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr3 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp78 = tl.load(in_ptr3 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp82 = tl.load(in_ptr3 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp101 = tl.load(in_ptr4 + 4 * x2, xmask, eviction_policy='evict_last') tmp104 = tl.load(in_ptr4 + (1 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp108 = tl.load(in_ptr4 + (2 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp112 = tl.load(in_ptr4 + (3 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp8 == tmp1 tmp11 = tl.where(tmp9, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp15 = tmp14 == tmp1 tmp17 = tl.where(tmp15, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp21 = tmp20 == tmp1 tmp23 = tl.where(tmp21, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tmp26 * tmp6 tmp28 = tl_math.exp(tmp27) tmp29 = tmp12 - tmp25 tmp30 = tmp29 * tmp6 tmp31 = tl_math.exp(tmp30) tmp32 = tmp28 + tmp31 tmp33 = tmp18 - tmp25 tmp34 = tmp33 * tmp6 tmp35 = tl_math.exp(tmp34) tmp36 = tmp32 + tmp35 tmp37 = tmp24 - tmp25 tmp38 = tmp37 * tmp6 tmp39 = tl_math.exp(tmp38) tmp40 = tmp36 + tmp39 tmp42 = tl.where(tmp2, tmp4, tmp41) tmp43 = tmp42 * tmp6 tmp45 = tl.where(tmp9, tmp4, tmp44) tmp46 = tmp45 * tmp6 tmp47 = triton_helpers.maximum(tmp43, tmp46) tmp49 = tl.where(tmp15, tmp4, tmp48) tmp50 = tmp49 * tmp6 tmp51 = triton_helpers.maximum(tmp47, tmp50) tmp53 = tl.where(tmp21, tmp4, tmp52) tmp54 = tmp53 * tmp6 tmp55 = triton_helpers.maximum(tmp51, tmp54) tmp56 = tmp43 - tmp55 tmp57 = tmp56 * tmp6 tmp58 = tl_math.exp(tmp57) tmp59 = tmp46 - tmp55 tmp60 = tmp59 * tmp6 tmp61 = tl_math.exp(tmp60) tmp62 = tmp58 + tmp61 tmp63 = tmp50 - tmp55 tmp64 = tmp63 * tmp6 tmp65 = tl_math.exp(tmp64) tmp66 = tmp62 + tmp65 tmp67 = tmp54 - tmp55 tmp68 = tmp67 * tmp6 tmp69 = tl_math.exp(tmp68) tmp70 = tmp66 + tmp69 tmp72 = tl.where(tmp2, tmp4, tmp71) tmp73 = tmp72 * tmp6 tmp75 = tl.where(tmp9, tmp4, tmp74) tmp76 = tmp75 * tmp6 tmp77 = triton_helpers.maximum(tmp73, tmp76) tmp79 = tl.where(tmp15, tmp4, tmp78) tmp80 = tmp79 * tmp6 tmp81 = triton_helpers.maximum(tmp77, tmp80) tmp83 = tl.where(tmp21, tmp4, tmp82) tmp84 = tmp83 * tmp6 tmp85 = triton_helpers.maximum(tmp81, tmp84) tmp86 = tmp73 - tmp85 tmp87 = tmp86 * tmp6 tmp88 = tl_math.exp(tmp87) tmp89 = tmp76 - tmp85 tmp90 = tmp89 * tmp6 tmp91 = tl_math.exp(tmp90) tmp92 = tmp88 + tmp91 tmp93 = tmp80 - tmp85 tmp94 = tmp93 * tmp6 tmp95 = tl_math.exp(tmp94) tmp96 = tmp92 + tmp95 tmp97 = tmp84 - tmp85 tmp98 = tmp97 * tmp6 tmp99 = tl_math.exp(tmp98) tmp100 = tmp96 + tmp99 tmp102 = tl.where(tmp2, tmp4, tmp101) tmp103 = tmp102 * tmp6 tmp105 = tl.where(tmp9, tmp4, tmp104) tmp106 = tmp105 * tmp6 tmp107 = triton_helpers.maximum(tmp103, tmp106) tmp109 = tl.where(tmp15, tmp4, tmp108) tmp110 = tmp109 * tmp6 tmp111 = triton_helpers.maximum(tmp107, tmp110) tmp113 = tl.where(tmp21, tmp4, tmp112) tmp114 = tmp113 * tmp6 tmp115 = triton_helpers.maximum(tmp111, tmp114) tmp116 = tmp103 - tmp115 tmp117 = tmp116 * tmp6 tmp118 = tl_math.exp(tmp117) tmp119 = tmp106 - tmp115 tmp120 = tmp119 * tmp6 tmp121 = tl_math.exp(tmp120) tmp122 = tmp118 + tmp121 tmp123 = tmp110 - tmp115 tmp124 = tmp123 * tmp6 tmp125 = tl_math.exp(tmp124) tmp126 = tmp122 + tmp125 tmp127 = tmp114 - tmp115 tmp128 = tmp127 * tmp6 tmp129 = tl_math.exp(tmp128) tmp130 = tmp126 + tmp129 tl.store(out_ptr0 + x2, tmp25, xmask) tl.store(out_ptr1 + x2, tmp40, xmask) tl.store(out_ptr2 + x2, tmp55, xmask) tl.store(out_ptr3 + x2, tmp70, xmask) tl.store(out_ptr4 + x2, tmp85, xmask) tl.store(out_ptr5 + x2, tmp100, xmask) tl.store(out_ptr6 + x2, tmp115, xmask) tl.store(out_ptr7 + x2, tmp130, xmask) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_11(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_out_ptr0 + x3, xmask) tmp8 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_out_ptr1 + x3, xmask) tmp17 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr2 + x3, xmask) tmp26 = tl.load(in_ptr5 + x4, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + x4, xmask, eviction_policy='evict_last') tmp32 = tl.load(in_out_ptr3 + x3, xmask) tmp35 = tl.load(in_ptr7 + x4, xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr8 + x4, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp6 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp15 = tl.where(tmp2, tmp4, tmp14) tmp16 = tmp15 * tmp6 tmp18 = tmp16 - tmp17 tmp19 = tmp18 * tmp6 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp24 = tl.where(tmp2, tmp4, tmp23) tmp25 = tmp24 * tmp6 tmp27 = tmp25 - tmp26 tmp28 = tmp27 * tmp6 tmp29 = tl_math.exp(tmp28) tmp31 = tmp29 / tmp30 tmp33 = tl.where(tmp2, tmp4, tmp32) tmp34 = tmp33 * tmp6 tmp36 = tmp34 - tmp35 tmp37 = tmp36 * tmp6 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tl.store(in_out_ptr0 + x3, tmp13, xmask) tl.store(in_out_ptr1 + x3, tmp22, xmask) tl.store(in_out_ptr2 + x3, tmp31, xmask) tl.store(in_out_ptr3 + x3, tmp40, xmask) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp41 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp52 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp71 = tl.load(in_ptr3 + 4 * x2, xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr3 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp78 = tl.load(in_ptr3 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp82 = tl.load(in_ptr3 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp8 == tmp1 tmp11 = tl.where(tmp9, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp15 = tmp14 == tmp1 tmp17 = tl.where(tmp15, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp21 = tmp20 == tmp1 tmp23 = tl.where(tmp21, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tmp26 * tmp6 tmp28 = tl_math.exp(tmp27) tmp29 = tmp12 - tmp25 tmp30 = tmp29 * tmp6 tmp31 = tl_math.exp(tmp30) tmp32 = tmp28 + tmp31 tmp33 = tmp18 - tmp25 tmp34 = tmp33 * tmp6 tmp35 = tl_math.exp(tmp34) tmp36 = tmp32 + tmp35 tmp37 = tmp24 - tmp25 tmp38 = tmp37 * tmp6 tmp39 = tl_math.exp(tmp38) tmp40 = tmp36 + tmp39 tmp42 = tl.where(tmp2, tmp4, tmp41) tmp43 = tmp42 * tmp6 tmp45 = tl.where(tmp9, tmp4, tmp44) tmp46 = tmp45 * tmp6 tmp47 = triton_helpers.maximum(tmp43, tmp46) tmp49 = tl.where(tmp15, tmp4, tmp48) tmp50 = tmp49 * tmp6 tmp51 = triton_helpers.maximum(tmp47, tmp50) tmp53 = tl.where(tmp21, tmp4, tmp52) tmp54 = tmp53 * tmp6 tmp55 = triton_helpers.maximum(tmp51, tmp54) tmp56 = tmp43 - tmp55 tmp57 = tmp56 * tmp6 tmp58 = tl_math.exp(tmp57) tmp59 = tmp46 - tmp55 tmp60 = tmp59 * tmp6 tmp61 = tl_math.exp(tmp60) tmp62 = tmp58 + tmp61 tmp63 = tmp50 - tmp55 tmp64 = tmp63 * tmp6 tmp65 = tl_math.exp(tmp64) tmp66 = tmp62 + tmp65 tmp67 = tmp54 - tmp55 tmp68 = tmp67 * tmp6 tmp69 = tl_math.exp(tmp68) tmp70 = tmp66 + tmp69 tmp72 = tl.where(tmp2, tmp4, tmp71) tmp73 = tmp72 * tmp6 tmp75 = tl.where(tmp9, tmp4, tmp74) tmp76 = tmp75 * tmp6 tmp77 = triton_helpers.maximum(tmp73, tmp76) tmp79 = tl.where(tmp15, tmp4, tmp78) tmp80 = tmp79 * tmp6 tmp81 = triton_helpers.maximum(tmp77, tmp80) tmp83 = tl.where(tmp21, tmp4, tmp82) tmp84 = tmp83 * tmp6 tmp85 = triton_helpers.maximum(tmp81, tmp84) tmp86 = tmp73 - tmp85 tmp87 = tmp86 * tmp6 tmp88 = tl_math.exp(tmp87) tmp89 = tmp76 - tmp85 tmp90 = tmp89 * tmp6 tmp91 = tl_math.exp(tmp90) tmp92 = tmp88 + tmp91 tmp93 = tmp80 - tmp85 tmp94 = tmp93 * tmp6 tmp95 = tl_math.exp(tmp94) tmp96 = tmp92 + tmp95 tmp97 = tmp84 - tmp85 tmp98 = tmp97 * tmp6 tmp99 = tl_math.exp(tmp98) tmp100 = tmp96 + tmp99 tl.store(out_ptr0 + x2, tmp25, xmask) tl.store(out_ptr1 + x2, tmp40, xmask) tl.store(out_ptr2 + x2, tmp55, xmask) tl.store(out_ptr3 + x2, tmp70, xmask) tl.store(out_ptr4 + x2, tmp85, xmask) tl.store(out_ptr5 + x2, tmp100, xmask) @triton.jit def triton_poi_fused_bmm_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (8 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (9 + 12 * x0), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_15(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (10 + 12 * x0), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (11 + 12 * x0), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.full([1], 1, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + x1, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp0 >= tmp8 tmp13 = tmp12 & tmp7 tmp14 = tl.load(in_ptr1 + x1, tmp13 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp11, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp7, tmp15, tmp16) tmp18 = tmp0 >= tmp5 tmp19 = tmp18 & tmp4 tmp20 = tl.load(in_ptr2 + x1, tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.where(tmp6, tmp17, tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp27 = tl.load(in_ptr3 + x1, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tl.where(tmp4, tmp23, tmp27) tl.store(out_ptr0 + (x0 + 8 * x1), tmp28, xmask) @triton.jit def triton_poi_fused_cat_18(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask) @triton.jit def triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp41 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp42 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp49 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp53 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = -1000000000.0 tmp5 = tl.where(tmp2, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp8 == tmp1 tmp11 = tl.where(tmp9, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp15 = tmp14 == tmp1 tmp17 = tl.where(tmp15, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp21 = tmp20 == tmp1 tmp23 = tl.where(tmp21, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tmp26 * tmp6 tmp28 = tl_math.exp(tmp27) tmp29 = tmp12 - tmp25 tmp30 = tmp29 * tmp6 tmp31 = tl_math.exp(tmp30) tmp32 = tmp28 + tmp31 tmp33 = tmp18 - tmp25 tmp34 = tmp33 * tmp6 tmp35 = tl_math.exp(tmp34) tmp36 = tmp32 + tmp35 tmp37 = tmp24 - tmp25 tmp38 = tmp37 * tmp6 tmp39 = tl_math.exp(tmp38) tmp40 = tmp36 + tmp39 tmp43 = tmp41 + tmp42 tmp44 = tmp43 * tmp0 tmp46 = tmp45 + tmp42 tmp47 = tmp46 * tmp8 tmp48 = tmp44 + tmp47 tmp50 = tmp49 + tmp42 tmp51 = tmp50 * tmp14 tmp52 = tmp48 + tmp51 tmp54 = tmp53 + tmp42 tmp55 = tmp54 * tmp20 tmp56 = tmp52 + tmp55 tmp57 = tmp0 + tmp8 tmp58 = tmp57 + tmp14 tmp59 = tmp58 + tmp20 tmp60 = tmp56 / tmp59 tmp61 = tl.full([1], 0, tl.int32) tmp62 = triton_helpers.maximum(tmp61, tmp60) tl.store(out_ptr0 + x2, tmp25, xmask) tl.store(out_ptr1 + x2, tmp40, xmask) tl.store(in_out_ptr0 + x2, tmp62, xmask) @triton.jit def triton_poi_fused_div_mul_relu_sum_20(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tmp11 = tmp10 + tmp1 tmp13 = tmp11 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 + tmp1 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp3 + tmp7 tmp21 = tmp20 + tmp12 tmp22 = tmp21 + tmp17 tmp23 = tmp19 / tmp22 tmp24 = tl.full([1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(in_out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_mul_22(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr1 + (4 + x0 + 12 * x3), xmask) tmp6 = tl.load(in_ptr1 + (x0 + 12 * x3), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp3 * tmp6 tl.store(out_ptr0 + x4, tmp5, xmask) tl.store(out_ptr1 + x4, tmp7, xmask) @triton.jit def triton_poi_fused_bmm_23(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_24(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_25(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_bmm_26(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_cat_27(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp29 = tl.load(in_ptr4 + x2, xmask) tmp30 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.full([1], 1, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + x1, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp0 >= tmp8 tmp13 = tmp12 & tmp7 tmp14 = tl.load(in_ptr1 + x1, tmp13 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp11, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp7, tmp15, tmp16) tmp18 = tmp0 >= tmp5 tmp19 = tmp18 & tmp4 tmp20 = tl.load(in_ptr2 + x1, tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.where(tmp6, tmp17, tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp27 = tl.load(in_ptr3 + x1, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tl.where(tmp4, tmp23, tmp27) tmp31 = tmp29 + tmp30 tmp32 = tmp31 + tmp28 tl.store(in_out_ptr0 + x2, tmp32, xmask) @triton.jit def triton_poi_fused_add_cat_28(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp29 = tl.load(in_out_ptr0 + x2, xmask) tmp30 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.full([1], 1, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + x1, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp0 >= tmp8 tmp13 = tmp12 & tmp7 tmp14 = tl.load(in_ptr1 + x1, tmp13 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp11, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp7, tmp15, tmp16) tmp18 = tmp0 >= tmp5 tmp19 = tmp18 & tmp4 tmp20 = tl.load(in_ptr2 + x1, tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.where(tmp6, tmp17, tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp27 = tl.load(in_ptr3 + x1, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tl.where(tmp4, tmp23, tmp27) tmp31 = tmp29 + tmp30 tmp32 = tmp31 + tmp28 tl.store(in_out_ptr0 + x2, tmp32, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (12, 4), (4, 1)) assert_size_stride(primals_10, (12,), (1,)) assert_size_stride(primals_11, (12, 4), (4, 1)) assert_size_stride(primals_12, (12,), (1,)) assert_size_stride(primals_13, (4, 8), (8, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 8), (8, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4,), (1,)) assert_size_stride(primals_21, (12, 4), (4, 1)) assert_size_stride(primals_22, (12,), (1,)) assert_size_stride(primals_23, (12, 4), (4, 1)) assert_size_stride(primals_24, (12,), (1,)) assert_size_stride(primals_25, (4, 4), (4, 1)) assert_size_stride(primals_26, (4,), (1,)) assert_size_stride(primals_27, (4, 4), (4, 1)) assert_size_stride(primals_28, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf673 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf0, buf2, buf673, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf672 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf4, buf672, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 12), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 12), (48, 12, 1), 0) del buf3 triton_poi_fused_mul_1[grid(192)](buf6, primals_10, primals_7, 192, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 12), (48, 12, 1), 0) del buf5 triton_poi_fused_mul_1[grid(192)](buf7, primals_12, primals_8, 192, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_bmm_2[grid(16)](buf6, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_poi_fused_bmm_3[grid(16)](buf7, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf8, buf9, out=buf10) buf11 = reinterpret_tensor(buf9, (4, 4, 1), (4, 1, 16), 0) del buf9 triton_poi_fused_bmm_2[grid(16)](buf7, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf8, (4, 1, 4), (4, 16, 1), 0) del buf8 triton_poi_fused_bmm_3[grid(16)](buf6, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf11, buf12, out=buf13) buf24 = reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 16), 0) del buf12 triton_poi_fused_bmm_4[grid(16)](buf6, buf24, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = reinterpret_tensor(buf11, (4, 1, 4), (4, 16, 1), 0) del buf11 triton_poi_fused_bmm_5[grid(16)](buf7, buf25, 16, XBLOCK=16, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf24, buf25, out=buf26) buf40 = reinterpret_tensor(buf25, (4, 4, 1), (4, 1, 16), 0) del buf25 triton_poi_fused_bmm_6[grid(16)](buf6, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = reinterpret_tensor(buf24, (4, 1, 4), (4, 16, 1), 0) del buf24 triton_poi_fused_bmm_7[grid(16)](buf7, buf41, 16, XBLOCK=16, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf40, buf41, out=buf42) buf56 = reinterpret_tensor(buf41, (4, 4, 1), (4, 1, 16), 0) del buf41 triton_poi_fused_bmm_8[grid(16)](buf6, buf56, 16, XBLOCK=16, num_warps=1, num_stages=1) buf57 = reinterpret_tensor(buf40, (4, 1, 4), (4, 16, 1), 0) del buf40 triton_poi_fused_bmm_9[grid(16)](buf7, buf57, 16, XBLOCK=16, num_warps=1, num_stages=1) buf58 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf56, buf57, out=buf58) buf14 = reinterpret_tensor(buf57, (4, 4, 1), (4, 1, 16), 0) del buf57 buf15 = buf56 del buf56 buf30 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf31 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf46 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf47 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf62 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf63 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_8, buf10, buf26, buf42, buf58, buf14, buf15, buf30, buf31, buf46, buf47, buf62, buf63, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf10 del buf10 buf32 = buf26 del buf26 buf48 = buf42 del buf42 buf64 = buf58 del buf58 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf16, buf32, buf48, buf64, primals_8, buf14, buf15, buf30, buf31, buf46, buf47, buf62, buf63, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = buf63 del buf63 triton_poi_fused_bmm_4[grid(16)](buf7, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1) buf28 = reinterpret_tensor(buf62, (4, 1, 4), (4, 16, 1), 0) del buf62 triton_poi_fused_bmm_5[grid(16)](buf6, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf27, buf28, out=buf29) buf43 = reinterpret_tensor(buf28, (4, 4, 1), (4, 1, 16), 0) del buf28 triton_poi_fused_bmm_6[grid(16)](buf7, buf43, 16, XBLOCK=16, num_warps=1, num_stages=1) buf44 = reinterpret_tensor(buf27, (4, 1, 4), (4, 16, 1), 0) del buf27 triton_poi_fused_bmm_7[grid(16)](buf6, buf44, 16, XBLOCK=16, num_warps=1, num_stages=1) buf45 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf43, buf44, out=buf45) buf17 = reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 16), 0) del buf44 buf18 = buf43 del buf43 buf33 = buf47 del buf47 buf34 = buf46 del buf46 buf49 = buf31 del buf31 buf50 = buf30 del buf30 triton_poi_fused__softmax_eq_masked_fill_12[grid(16)](primals_7, buf13, buf29, buf45, buf17, buf18, buf33, buf34, buf49, buf50, 16, XBLOCK=16, num_warps=1, num_stages=1) buf59 = buf15 del buf15 triton_poi_fused_bmm_8[grid(16)](buf7, buf59, 16, XBLOCK=16, num_warps=1, num_stages=1) buf60 = reinterpret_tensor(buf14, (4, 1, 4), (4, 16, 1), 0) del buf14 triton_poi_fused_bmm_9[grid(16)](buf6, buf60, 16, XBLOCK=16, num_warps=1, num_stages=1) buf61 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf59, buf60, out=buf61) buf20 = reinterpret_tensor(buf60, (4, 4, 1), (4, 1, 16), 0) del buf60 triton_poi_fused_bmm_13[grid(16)](buf7, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) buf21 = reinterpret_tensor(buf59, (4, 4, 1), (4, 1, 1), 0) del buf59 extern_kernels.bmm(buf16, buf20, out=buf21) buf36 = buf20 del buf20 triton_poi_fused_bmm_14[grid(16)](buf7, buf36, 16, XBLOCK=16, num_warps=1, num_stages=1) buf37 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf32, buf36, out=buf37) buf52 = buf36 del buf36 triton_poi_fused_bmm_15[grid(16)](buf7, buf52, 16, XBLOCK=16, num_warps=1, num_stages=1) buf53 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf48, buf52, out=buf53) buf68 = buf52 del buf52 triton_poi_fused_bmm_16[grid(16)](buf7, buf68, 16, XBLOCK=16, num_warps=1, num_stages=1) buf69 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf64, buf68, out=buf69) buf75 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf70 = reinterpret_tensor(buf75, (4, 4, 4), (32, 8, 1), 4) triton_poi_fused_cat_17[grid(64)](buf21, buf37, buf53, buf69, buf70, 64, XBLOCK=64, num_warps=1, num_stages=1) buf74 = reinterpret_tensor(buf75, (4, 4, 4), (32, 8, 1), 0) triton_poi_fused_cat_18[grid(64)](buf0, buf74, 64, XBLOCK=64, num_warps=1, num_stages=1) buf78 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf75, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf78) buf65 = reinterpret_tensor(buf69, (4, 4, 1), (4, 1, 16), 0) del buf69 buf66 = reinterpret_tensor(buf53, (4, 4, 1), (4, 1, 16), 0) del buf53 buf80 = reinterpret_tensor(buf37, (4, 4), (4, 1), 0) del buf37 buf82 = buf80 del buf80 triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19[grid(16)]( buf82, primals_7, buf61, buf78, primals_14, buf65, buf66, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = buf13 del buf13 buf35 = buf29 del buf29 buf51 = buf45 del buf45 buf67 = buf61 del buf61 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf19, buf35, buf51, buf67, primals_7, buf17, buf18, buf33, buf34, buf49, buf50, buf65, buf66, 64, XBLOCK=64, num_warps=1, num_stages=1) buf22 = buf66 del buf66 triton_poi_fused_bmm_13[grid(16)](buf6, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = reinterpret_tensor(buf65, (4, 4, 1), (4, 1, 1), 0) del buf65 extern_kernels.bmm(buf19, buf22, out=buf23) buf38 = buf22 del buf22 triton_poi_fused_bmm_14[grid(16)](buf6, buf38, 16, XBLOCK=16, num_warps=1, num_stages=1) buf39 = reinterpret_tensor(buf50, (4, 4, 1), (4, 1, 1), 0) del buf50 extern_kernels.bmm(buf35, buf38, out=buf39) buf54 = buf38 del buf38 triton_poi_fused_bmm_15[grid(16)](buf6, buf54, 16, XBLOCK=16, num_warps=1, num_stages=1) buf55 = reinterpret_tensor(buf49, (4, 4, 1), (4, 1, 1), 0) del buf49 extern_kernels.bmm(buf51, buf54, out=buf55) buf71 = buf54 del buf54 triton_poi_fused_bmm_16[grid(16)](buf6, buf71, 16, XBLOCK=16, num_warps=1, num_stages=1) buf72 = reinterpret_tensor(buf34, (4, 4, 1), (4, 1, 1), 0) del buf34 extern_kernels.bmm(buf67, buf71, out=buf72) buf77 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf73 = reinterpret_tensor(buf77, (4, 4, 4), (32, 8, 1), 4) triton_poi_fused_cat_17[grid(64)](buf23, buf39, buf55, buf72, buf73, 64, XBLOCK=64, num_warps=1, num_stages=1) buf76 = reinterpret_tensor(buf77, (4, 4, 4), (32, 8, 1), 0) triton_poi_fused_cat_18[grid(64)](buf1, buf76, 64, XBLOCK=64, num_warps=1, num_stages=1) buf79 = buf1 del buf1 extern_kernels.mm(reinterpret_tensor(buf77, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf79) buf81 = reinterpret_tensor(buf72, (4, 4), (4, 1), 0) del buf72 buf84 = buf81 del buf81 triton_poi_fused_div_mul_relu_sum_20[grid(16)](buf84, buf79, primals_16, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf83 = reinterpret_tensor(buf55, (4, 4), (4, 1), 0) del buf55 extern_kernels.addmm(primals_18, buf82, reinterpret_tensor( primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf83) buf85 = reinterpret_tensor(buf39, (4, 4), (4, 1), 0) del buf39 extern_kernels.addmm(primals_20, buf84, reinterpret_tensor( primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf85) buf86 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf671 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_21[grid(64)](buf78, primals_14, buf86, buf671, 64, XBLOCK=64, num_warps=1, num_stages=1 ) buf87 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf86, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 12), (1, 4), 0), out=buf87) buf88 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf670 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_21[grid(64)](buf79, primals_16, buf88, buf670, 64, XBLOCK=64, num_warps=1, num_stages=1 ) buf89 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf88, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf89) buf90 = reinterpret_tensor(buf87, (4, 4, 12), (48, 12, 1), 0) del buf87 triton_poi_fused_mul_1[grid(192)](buf90, primals_22, primals_7, 192, XBLOCK=256, num_warps=4, num_stages=1) buf91 = reinterpret_tensor(buf89, (4, 4, 12), (48, 12, 1), 0) del buf89 triton_poi_fused_mul_1[grid(192)](buf91, primals_24, primals_8, 192, XBLOCK=256, num_warps=4, num_stages=1) buf92 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf93 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_22[grid(64)](buf85, buf90, buf92, buf93, 64, XBLOCK=64, num_warps=1, num_stages=1) buf94 = reinterpret_tensor(buf23, (4, 4, 1), (4, 1, 16), 0) del buf23 triton_poi_fused_bmm_23[grid(16)](buf92, buf94, 16, XBLOCK=16, num_warps=1, num_stages=1) buf95 = reinterpret_tensor(buf71, (4, 1, 4), (4, 16, 1), 0) del buf71 triton_poi_fused_bmm_23[grid(16)](buf93, buf95, 16, XBLOCK=16, num_warps=1, num_stages=1) buf96 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf94, buf95, out=buf96) buf97 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf98 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_22[grid(64)](buf83, buf91, buf97, buf98, 64, XBLOCK=64, num_warps=1, num_stages=1) buf99 = reinterpret_tensor(buf95, (4, 4, 1), (4, 1, 16), 0) del buf95 triton_poi_fused_bmm_23[grid(16)](buf97, buf99, 16, XBLOCK=16, num_warps=1, num_stages=1) buf100 = reinterpret_tensor(buf94, (4, 1, 4), (4, 16, 1), 0) del buf94 triton_poi_fused_bmm_23[grid(16)](buf98, buf100, 16, XBLOCK=16, num_warps=1, num_stages=1) buf101 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf99, buf100, out=buf101) buf112 = buf99 del buf99 triton_poi_fused_bmm_24[grid(16)](buf92, buf112, 16, XBLOCK=16, num_warps=1, num_stages=1) buf113 = buf100 del buf100 triton_poi_fused_bmm_24[grid(16)](buf93, buf113, 16, XBLOCK=16, num_warps=1, num_stages=1) buf114 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf112, buf113, out=buf114) buf128 = reinterpret_tensor(buf113, (4, 4, 1), (4, 1, 16), 0) del buf113 triton_poi_fused_bmm_25[grid(16)](buf92, buf128, 16, XBLOCK=16, num_warps=1, num_stages=1) buf129 = reinterpret_tensor(buf112, (4, 1, 4), (4, 16, 1), 0) del buf112 triton_poi_fused_bmm_25[grid(16)](buf93, buf129, 16, XBLOCK=16, num_warps=1, num_stages=1) buf130 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf128, buf129, out=buf130) buf144 = reinterpret_tensor(buf129, (4, 4, 1), (4, 1, 16), 0) del buf129 triton_poi_fused_bmm_26[grid(16)](buf92, buf144, 16, XBLOCK=16, num_warps=1, num_stages=1) buf145 = reinterpret_tensor(buf128, (4, 1, 4), (4, 16, 1), 0) del buf128 triton_poi_fused_bmm_26[grid(16)](buf93, buf145, 16, XBLOCK=16, num_warps=1, num_stages=1) buf146 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf144, buf145, out=buf146) buf102 = reinterpret_tensor(buf145, (4, 4, 1), (4, 1, 16), 0) del buf145 buf103 = buf144 del buf144 buf118 = buf33 del buf33 buf119 = buf18 del buf18 buf134 = buf17 del buf17 buf135 = reinterpret_tensor(buf21, (4, 4, 1), (4, 1, 16), 0) del buf21 buf150 = buf68 del buf68 buf151 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_7, buf96, buf114, buf130, buf146, buf102, buf103, buf118, buf119, buf134, buf135, buf150, buf151, 16, XBLOCK=16, num_warps=1, num_stages=1) buf104 = buf96 del buf96 buf120 = buf114 del buf114 buf136 = buf130 del buf130 buf152 = buf146 del buf146 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf104, buf120, buf136, buf152, primals_7, buf102, buf103, buf118, buf119, buf134, buf135, buf150, buf151, 64, XBLOCK=64, num_warps=1, num_stages=1) buf115 = buf151 del buf151 triton_poi_fused_bmm_24[grid(16)](buf97, buf115, 16, XBLOCK=16, num_warps=1, num_stages=1) buf116 = reinterpret_tensor(buf150, (4, 1, 4), (4, 16, 1), 0) del buf150 triton_poi_fused_bmm_24[grid(16)](buf98, buf116, 16, XBLOCK=16, num_warps=1, num_stages=1) buf117 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf115, buf116, out=buf117) buf131 = reinterpret_tensor(buf116, (4, 4, 1), (4, 1, 16), 0) del buf116 triton_poi_fused_bmm_25[grid(16)](buf97, buf131, 16, XBLOCK=16, num_warps=1, num_stages=1) buf132 = reinterpret_tensor(buf115, (4, 1, 4), (4, 16, 1), 0) del buf115 triton_poi_fused_bmm_25[grid(16)](buf98, buf132, 16, XBLOCK=16, num_warps=1, num_stages=1) buf133 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf131, buf132, out=buf133) buf147 = reinterpret_tensor(buf132, (4, 4, 1), (4, 1, 16), 0) del buf132 triton_poi_fused_bmm_26[grid(16)](buf97, buf147, 16, XBLOCK=16, num_warps=1, num_stages=1) buf148 = reinterpret_tensor(buf131, (4, 1, 4), (4, 16, 1), 0) del buf131 triton_poi_fused_bmm_26[grid(16)](buf98, buf148, 16, XBLOCK=16, num_warps=1, num_stages=1) buf149 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf147, buf148, out=buf149) buf105 = reinterpret_tensor(buf148, (4, 4, 1), (4, 1, 16), 0) del buf148 buf106 = buf147 del buf147 buf121 = buf135 del buf135 buf122 = buf134 del buf134 buf137 = buf119 del buf119 buf138 = buf118 del buf118 buf153 = buf103 del buf103 buf154 = buf102 del buf102 triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_8, buf101, buf117, buf133, buf149, buf105, buf106, buf121, buf122, buf137, buf138, buf153, buf154, 16, XBLOCK=16, num_warps=1, num_stages=1) buf107 = buf101 del buf101 buf123 = buf117 del buf117 buf139 = buf133 del buf133 buf155 = buf149 del buf149 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf107, buf123, buf139, buf155, primals_8, buf105, buf106, buf121, buf122, buf137, buf138, buf153, buf154, 64, XBLOCK=64, num_warps=1, num_stages=1) buf108 = buf154 del buf154 triton_poi_fused_bmm_13[grid(16)](buf90, buf108, 16, XBLOCK=16, num_warps=1, num_stages=1) buf109 = reinterpret_tensor(buf153, (4, 4, 1), (4, 1, 1), 0) del buf153 extern_kernels.bmm(buf104, buf108, out=buf109) buf110 = buf108 del buf108 triton_poi_fused_bmm_13[grid(16)](buf91, buf110, 16, XBLOCK=16, num_warps=1, num_stages=1) buf111 = reinterpret_tensor(buf138, (4, 4, 1), (4, 1, 1), 0) del buf138 extern_kernels.bmm(buf107, buf110, out=buf111) buf124 = buf110 del buf110 triton_poi_fused_bmm_14[grid(16)](buf90, buf124, 16, XBLOCK=16, num_warps=1, num_stages=1) buf125 = reinterpret_tensor(buf137, (4, 4, 1), (4, 1, 1), 0) del buf137 extern_kernels.bmm(buf120, buf124, out=buf125) buf126 = buf124 del buf124 triton_poi_fused_bmm_14[grid(16)](buf91, buf126, 16, XBLOCK=16, num_warps=1, num_stages=1) buf127 = reinterpret_tensor(buf122, (4, 4, 1), (4, 1, 1), 0) del buf122 extern_kernels.bmm(buf123, buf126, out=buf127) buf140 = buf126 del buf126 triton_poi_fused_bmm_15[grid(16)](buf90, buf140, 16, XBLOCK=16, num_warps=1, num_stages=1) buf141 = reinterpret_tensor(buf121, (4, 4, 1), (4, 1, 1), 0) del buf121 extern_kernels.bmm(buf136, buf140, out=buf141) buf142 = buf140 del buf140 triton_poi_fused_bmm_15[grid(16)](buf91, buf142, 16, XBLOCK=16, num_warps=1, num_stages=1) buf143 = reinterpret_tensor(buf106, (4, 4, 1), (4, 1, 1), 0) del buf106 extern_kernels.bmm(buf139, buf142, out=buf143) buf156 = buf142 del buf142 triton_poi_fused_bmm_16[grid(16)](buf90, buf156, 16, XBLOCK=16, num_warps=1, num_stages=1) buf157 = reinterpret_tensor(buf105, (4, 4, 1), (4, 1, 1), 0) del buf105 extern_kernels.bmm(buf152, buf156, out=buf157) buf158 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf162 = buf158 del buf158 triton_poi_fused_add_cat_27[grid(64)](buf162, buf109, buf125, buf141, buf157, buf78, primals_14, 64, XBLOCK=64, num_warps=1, num_stages=1) buf159 = reinterpret_tensor(buf157, (4, 4, 1), (4, 1, 16), 0) del buf157 triton_poi_fused_bmm_16[grid(16)](buf91, buf159, 16, XBLOCK=16, num_warps=1, num_stages=1) buf160 = buf141 del buf141 extern_kernels.bmm(buf155, buf159, out=buf160) buf161 = reinterpret_tensor(buf78, (4, 4, 4), (16, 4, 1), 0) del buf78 buf164 = buf161 del buf161 triton_poi_fused_add_cat_27[grid(64)](buf164, buf111, buf127, buf143, buf160, buf79, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf163 = buf79 del buf79 extern_kernels.addmm(primals_26, reinterpret_tensor(buf162, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf163) buf165 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_28, reinterpret_tensor(buf164, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf165) buf166 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf669 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf163, buf166, buf669, 64, XBLOCK=64, num_warps=1, num_stages=1) buf167 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf166, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf167) buf168 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf668 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf165, buf168, buf668, 64, XBLOCK=64, num_warps=1, num_stages=1) buf169 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf168, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 12), (1, 4), 0), out=buf169) buf170 = reinterpret_tensor(buf167, (4, 4, 12), (48, 12, 1), 0) del buf167 triton_poi_fused_mul_1[grid(192)](buf170, primals_10, primals_7, 192, XBLOCK=256, num_warps=4, num_stages=1) buf171 = reinterpret_tensor(buf169, (4, 4, 12), (48, 12, 1), 0) del buf169 triton_poi_fused_mul_1[grid(192)](buf171, primals_12, primals_8, 192, XBLOCK=256, num_warps=4, num_stages=1) buf172 = reinterpret_tensor(buf160, (4, 4, 1), (4, 1, 16), 0) del buf160 triton_poi_fused_bmm_2[grid(16)](buf170, buf172, 16, XBLOCK=16, num_warps=1, num_stages=1) buf173 = reinterpret_tensor(buf143, (4, 1, 4), (4, 16, 1), 0) del buf143 triton_poi_fused_bmm_3[grid(16)](buf171, buf173, 16, XBLOCK=16, num_warps=1, num_stages=1) buf174 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf172, buf173, out=buf174) buf175 = reinterpret_tensor(buf173, (4, 4, 1), (4, 1, 16), 0) del buf173 triton_poi_fused_bmm_2[grid(16)](buf171, buf175, 16, XBLOCK=16, num_warps=1, num_stages=1) buf176 = reinterpret_tensor(buf172, (4, 1, 4), (4, 16, 1), 0) del buf172 triton_poi_fused_bmm_3[grid(16)](buf170, buf176, 16, XBLOCK=16, num_warps=1, num_stages=1) buf177 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf175, buf176, out=buf177) buf188 = reinterpret_tensor(buf176, (4, 4, 1), (4, 1, 16), 0) del buf176 triton_poi_fused_bmm_4[grid(16)](buf170, buf188, 16, XBLOCK=16, num_warps=1, num_stages=1) buf189 = reinterpret_tensor(buf175, (4, 1, 4), (4, 16, 1), 0) del buf175 triton_poi_fused_bmm_5[grid(16)](buf171, buf189, 16, XBLOCK=16, num_warps=1, num_stages=1) buf190 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf188, buf189, out=buf190) buf204 = reinterpret_tensor(buf189, (4, 4, 1), (4, 1, 16), 0) del buf189 triton_poi_fused_bmm_6[grid(16)](buf170, buf204, 16, XBLOCK=16, num_warps=1, num_stages=1) buf205 = reinterpret_tensor(buf188, (4, 1, 4), (4, 16, 1), 0) del buf188 triton_poi_fused_bmm_7[grid(16)](buf171, buf205, 16, XBLOCK=16, num_warps=1, num_stages=1) buf206 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf204, buf205, out=buf206) buf220 = reinterpret_tensor(buf205, (4, 4, 1), (4, 1, 16), 0) del buf205 triton_poi_fused_bmm_8[grid(16)](buf170, buf220, 16, XBLOCK=16, num_warps=1, num_stages=1) buf221 = reinterpret_tensor(buf204, (4, 1, 4), (4, 16, 1), 0) del buf204 triton_poi_fused_bmm_9[grid(16)](buf171, buf221, 16, XBLOCK=16, num_warps=1, num_stages=1) buf222 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf220, buf221, out=buf222) buf178 = reinterpret_tensor(buf221, (4, 4, 1), (4, 1, 16), 0) del buf221 buf179 = buf220 del buf220 buf194 = reinterpret_tensor(buf127, (4, 4, 1), (4, 1, 16), 0) del buf127 buf195 = reinterpret_tensor(buf111, (4, 4, 1), (4, 1, 16), 0) del buf111 buf210 = buf159 del buf159 buf211 = reinterpret_tensor(buf125, (4, 4, 1), (4, 1, 16), 0) del buf125 buf226 = reinterpret_tensor(buf109, (4, 4, 1), (4, 1, 16), 0) del buf109 buf227 = buf156 del buf156 triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_8, buf174, buf190, buf206, buf222, buf178, buf179, buf194, buf195, buf210, buf211, buf226, buf227, 16, XBLOCK=16, num_warps=1, num_stages=1) buf180 = buf174 del buf174 buf196 = buf190 del buf190 buf212 = buf206 del buf206 buf228 = buf222 del buf222 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf180, buf196, buf212, buf228, primals_8, buf178, buf179, buf194, buf195, buf210, buf211, buf226, buf227, 64, XBLOCK=64, num_warps=1, num_stages=1) buf191 = buf227 del buf227 triton_poi_fused_bmm_4[grid(16)](buf171, buf191, 16, XBLOCK=16, num_warps=1, num_stages=1) buf192 = reinterpret_tensor(buf226, (4, 1, 4), (4, 16, 1), 0) del buf226 triton_poi_fused_bmm_5[grid(16)](buf170, buf192, 16, XBLOCK=16, num_warps=1, num_stages=1) buf193 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf191, buf192, out=buf193) buf207 = reinterpret_tensor(buf192, (4, 4, 1), (4, 1, 16), 0) del buf192 triton_poi_fused_bmm_6[grid(16)](buf171, buf207, 16, XBLOCK=16, num_warps=1, num_stages=1) buf208 = reinterpret_tensor(buf191, (4, 1, 4), (4, 16, 1), 0) del buf191 triton_poi_fused_bmm_7[grid(16)](buf170, buf208, 16, XBLOCK=16, num_warps=1, num_stages=1) buf209 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf207, buf208, out=buf209) buf181 = reinterpret_tensor(buf208, (4, 4, 1), (4, 1, 16), 0) del buf208 buf182 = buf207 del buf207 buf197 = buf211 del buf211 buf198 = buf210 del buf210 buf213 = buf195 del buf195 buf214 = buf194 del buf194 triton_poi_fused__softmax_eq_masked_fill_12[grid(16)](primals_7, buf177, buf193, buf209, buf181, buf182, buf197, buf198, buf213, buf214, 16, XBLOCK=16, num_warps=1, num_stages=1) buf223 = buf179 del buf179 triton_poi_fused_bmm_8[grid(16)](buf171, buf223, 16, XBLOCK=16, num_warps=1, num_stages=1) buf224 = reinterpret_tensor(buf178, (4, 1, 4), (4, 16, 1), 0) del buf178 triton_poi_fused_bmm_9[grid(16)](buf170, buf224, 16, XBLOCK=16, num_warps=1, num_stages=1) buf225 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf223, buf224, out=buf225) buf184 = reinterpret_tensor(buf224, (4, 4, 1), (4, 1, 16), 0) del buf224 triton_poi_fused_bmm_13[grid(16)](buf171, buf184, 16, XBLOCK=16, num_warps=1, num_stages=1) buf185 = reinterpret_tensor(buf223, (4, 4, 1), (4, 1, 1), 0) del buf223 extern_kernels.bmm(buf180, buf184, out=buf185) buf200 = buf184 del buf184 triton_poi_fused_bmm_14[grid(16)](buf171, buf200, 16, XBLOCK=16, num_warps=1, num_stages=1) buf201 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf196, buf200, out=buf201) buf216 = buf200 del buf200 triton_poi_fused_bmm_15[grid(16)](buf171, buf216, 16, XBLOCK=16, num_warps=1, num_stages=1) buf217 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf212, buf216, out=buf217) buf232 = buf216 del buf216 triton_poi_fused_bmm_16[grid(16)](buf171, buf232, 16, XBLOCK=16, num_warps=1, num_stages=1) buf233 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf228, buf232, out=buf233) buf239 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf234 = reinterpret_tensor(buf239, (4, 4, 4), (32, 8, 1), 4) triton_poi_fused_cat_17[grid(64)](buf185, buf201, buf217, buf233, buf234, 64, XBLOCK=64, num_warps=1, num_stages=1) buf238 = reinterpret_tensor(buf239, (4, 4, 4), (32, 8, 1), 0) triton_poi_fused_cat_18[grid(64)](buf163, buf238, 64, XBLOCK=64, num_warps=1, num_stages=1) buf242 = buf163 del buf163 extern_kernels.mm(reinterpret_tensor(buf239, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf242) buf229 = reinterpret_tensor(buf233, (4, 4, 1), (4, 1, 16), 0) del buf233 buf230 = reinterpret_tensor(buf217, (4, 4, 1), (4, 1, 16), 0) del buf217 buf244 = reinterpret_tensor(buf201, (4, 4), (4, 1), 0) del buf201 buf246 = buf244 del buf244 triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19[grid(16)]( buf246, primals_7, buf225, buf242, primals_14, buf229, buf230, 16, XBLOCK=16, num_warps=1, num_stages=1) buf183 = buf177 del buf177 buf199 = buf193 del buf193 buf215 = buf209 del buf209 buf231 = buf225 del buf225 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf183, buf199, buf215, buf231, primals_7, buf181, buf182, buf197, buf198, buf213, buf214, buf229, buf230, 64, XBLOCK=64, num_warps=1, num_stages=1) buf186 = buf230 del buf230 triton_poi_fused_bmm_13[grid(16)](buf170, buf186, 16, XBLOCK=16, num_warps=1, num_stages=1) buf187 = reinterpret_tensor(buf229, (4, 4, 1), (4, 1, 1), 0) del buf229 extern_kernels.bmm(buf183, buf186, out=buf187) buf202 = buf186 del buf186 triton_poi_fused_bmm_14[grid(16)](buf170, buf202, 16, XBLOCK=16, num_warps=1, num_stages=1) buf203 = reinterpret_tensor(buf214, (4, 4, 1), (4, 1, 1), 0) del buf214 extern_kernels.bmm(buf199, buf202, out=buf203) buf218 = buf202 del buf202 triton_poi_fused_bmm_15[grid(16)](buf170, buf218, 16, XBLOCK=16, num_warps=1, num_stages=1) buf219 = reinterpret_tensor(buf213, (4, 4, 1), (4, 1, 1), 0) del buf213 extern_kernels.bmm(buf215, buf218, out=buf219) buf235 = buf218 del buf218 triton_poi_fused_bmm_16[grid(16)](buf170, buf235, 16, XBLOCK=16, num_warps=1, num_stages=1) buf236 = reinterpret_tensor(buf198, (4, 4, 1), (4, 1, 1), 0) del buf198 extern_kernels.bmm(buf231, buf235, out=buf236) buf241 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf237 = reinterpret_tensor(buf241, (4, 4, 4), (32, 8, 1), 4) triton_poi_fused_cat_17[grid(64)](buf187, buf203, buf219, buf236, buf237, 64, XBLOCK=64, num_warps=1, num_stages=1) buf240 = reinterpret_tensor(buf241, (4, 4, 4), (32, 8, 1), 0) triton_poi_fused_cat_18[grid(64)](buf165, buf240, 64, XBLOCK=64, num_warps=1, num_stages=1) buf243 = buf165 del buf165 extern_kernels.mm(reinterpret_tensor(buf241, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf243) buf245 = reinterpret_tensor(buf236, (4, 4), (4, 1), 0) del buf236 buf248 = buf245 del buf245 triton_poi_fused_div_mul_relu_sum_20[grid(16)](buf248, buf243, primals_16, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf247 = reinterpret_tensor(buf219, (4, 4), (4, 1), 0) del buf219 extern_kernels.addmm(primals_18, buf246, reinterpret_tensor( primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf247) buf249 = reinterpret_tensor(buf203, (4, 4), (4, 1), 0) del buf203 extern_kernels.addmm(primals_20, buf248, reinterpret_tensor( primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf249) buf250 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf667 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_21[grid(64)](buf242, primals_14, buf250, buf667, 64, XBLOCK=64, num_warps=1, num_stages=1) buf251 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf250, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 12), (1, 4), 0), out=buf251) buf252 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf666 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_21[grid(64)](buf243, primals_16, buf252, buf666, 64, XBLOCK=64, num_warps=1, num_stages=1) buf253 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf252, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf253) buf254 = reinterpret_tensor(buf251, (4, 4, 12), (48, 12, 1), 0) del buf251 triton_poi_fused_mul_1[grid(192)](buf254, primals_22, primals_7, 192, XBLOCK=256, num_warps=4, num_stages=1) buf255 = reinterpret_tensor(buf253, (4, 4, 12), (48, 12, 1), 0) del buf253 triton_poi_fused_mul_1[grid(192)](buf255, primals_24, primals_8, 192, XBLOCK=256, num_warps=4, num_stages=1) buf256 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf257 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_22[grid(64)](buf249, buf254, buf256, buf257, 64, XBLOCK=64, num_warps=1, num_stages=1) buf258 = reinterpret_tensor(buf187, (4, 4, 1), (4, 1, 16), 0) del buf187 triton_poi_fused_bmm_23[grid(16)](buf256, buf258, 16, XBLOCK=16, num_warps=1, num_stages=1) buf259 = reinterpret_tensor(buf235, (4, 1, 4), (4, 16, 1), 0) del buf235 triton_poi_fused_bmm_23[grid(16)](buf257, buf259, 16, XBLOCK=16, num_warps=1, num_stages=1) buf260 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf258, buf259, out=buf260) buf261 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf262 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_22[grid(64)](buf247, buf255, buf261, buf262, 64, XBLOCK=64, num_warps=1, num_stages=1) buf263 = reinterpret_tensor(buf259, (4, 4, 1), (4, 1, 16), 0) del buf259 triton_poi_fused_bmm_23[grid(16)](buf261, buf263, 16, XBLOCK=16, num_warps=1, num_stages=1) buf264 = reinterpret_tensor(buf258, (4, 1, 4), (4, 16, 1), 0) del buf258 triton_poi_fused_bmm_23[grid(16)](buf262, buf264, 16, XBLOCK=16, num_warps=1, num_stages=1) buf265 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf263, buf264, out=buf265) buf276 = reinterpret_tensor(buf264, (4, 4, 1), (4, 1, 16), 0) del buf264 triton_poi_fused_bmm_24[grid(16)](buf256, buf276, 16, XBLOCK=16, num_warps=1, num_stages=1) buf277 = reinterpret_tensor(buf263, (4, 1, 4), (4, 16, 1), 0) del buf263 triton_poi_fused_bmm_24[grid(16)](buf257, buf277, 16, XBLOCK=16, num_warps=1, num_stages=1) buf278 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf276, buf277, out=buf278) buf292 = reinterpret_tensor(buf277, (4, 4, 1), (4, 1, 16), 0) del buf277 triton_poi_fused_bmm_25[grid(16)](buf256, buf292, 16, XBLOCK=16, num_warps=1, num_stages=1) buf293 = reinterpret_tensor(buf276, (4, 1, 4), (4, 16, 1), 0) del buf276 triton_poi_fused_bmm_25[grid(16)](buf257, buf293, 16, XBLOCK=16, num_warps=1, num_stages=1) buf294 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf292, buf293, out=buf294) buf308 = reinterpret_tensor(buf293, (4, 4, 1), (4, 1, 16), 0) del buf293 triton_poi_fused_bmm_26[grid(16)](buf256, buf308, 16, XBLOCK=16, num_warps=1, num_stages=1) buf309 = reinterpret_tensor(buf292, (4, 1, 4), (4, 16, 1), 0) del buf292 triton_poi_fused_bmm_26[grid(16)](buf257, buf309, 16, XBLOCK=16, num_warps=1, num_stages=1) buf310 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf308, buf309, out=buf310) buf266 = reinterpret_tensor(buf309, (4, 4, 1), (4, 1, 16), 0) del buf309 buf267 = buf308 del buf308 buf282 = buf197 del buf197 buf283 = buf182 del buf182 buf298 = buf181 del buf181 buf299 = reinterpret_tensor(buf185, (4, 4, 1), (4, 1, 16), 0) del buf185 buf314 = buf232 del buf232 buf315 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_7, buf260, buf278, buf294, buf310, buf266, buf267, buf282, buf283, buf298, buf299, buf314, buf315, 16, XBLOCK=16, num_warps=1, num_stages=1) buf268 = buf260 del buf260 buf284 = buf278 del buf278 buf300 = buf294 del buf294 buf316 = buf310 del buf310 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf268, buf284, buf300, buf316, primals_7, buf266, buf267, buf282, buf283, buf298, buf299, buf314, buf315, 64, XBLOCK=64, num_warps=1, num_stages=1) buf279 = buf315 del buf315 triton_poi_fused_bmm_24[grid(16)](buf261, buf279, 16, XBLOCK=16, num_warps=1, num_stages=1) buf280 = reinterpret_tensor(buf314, (4, 1, 4), (4, 16, 1), 0) del buf314 triton_poi_fused_bmm_24[grid(16)](buf262, buf280, 16, XBLOCK=16, num_warps=1, num_stages=1) buf281 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf279, buf280, out=buf281) buf295 = reinterpret_tensor(buf280, (4, 4, 1), (4, 1, 16), 0) del buf280 triton_poi_fused_bmm_25[grid(16)](buf261, buf295, 16, XBLOCK=16, num_warps=1, num_stages=1) buf296 = reinterpret_tensor(buf279, (4, 1, 4), (4, 16, 1), 0) del buf279 triton_poi_fused_bmm_25[grid(16)](buf262, buf296, 16, XBLOCK=16, num_warps=1, num_stages=1) buf297 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf295, buf296, out=buf297) buf311 = reinterpret_tensor(buf296, (4, 4, 1), (4, 1, 16), 0) del buf296 triton_poi_fused_bmm_26[grid(16)](buf261, buf311, 16, XBLOCK=16, num_warps=1, num_stages=1) buf312 = reinterpret_tensor(buf295, (4, 1, 4), (4, 16, 1), 0) del buf295 triton_poi_fused_bmm_26[grid(16)](buf262, buf312, 16, XBLOCK=16, num_warps=1, num_stages=1) buf313 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf311, buf312, out=buf313) buf269 = reinterpret_tensor(buf312, (4, 4, 1), (4, 1, 16), 0) del buf312 buf270 = buf311 del buf311 buf285 = buf299 del buf299 buf286 = buf298 del buf298 buf301 = buf283 del buf283 buf302 = buf282 del buf282 buf317 = buf267 del buf267 buf318 = buf266 del buf266 triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_8, buf265, buf281, buf297, buf313, buf269, buf270, buf285, buf286, buf301, buf302, buf317, buf318, 16, XBLOCK=16, num_warps=1, num_stages=1) buf271 = buf265 del buf265 buf287 = buf281 del buf281 buf303 = buf297 del buf297 buf319 = buf313 del buf313 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf271, buf287, buf303, buf319, primals_8, buf269, buf270, buf285, buf286, buf301, buf302, buf317, buf318, 64, XBLOCK=64, num_warps=1, num_stages=1) buf272 = buf318 del buf318 triton_poi_fused_bmm_13[grid(16)](buf254, buf272, 16, XBLOCK=16, num_warps=1, num_stages=1) buf273 = reinterpret_tensor(buf317, (4, 4, 1), (4, 1, 1), 0) del buf317 extern_kernels.bmm(buf268, buf272, out=buf273) buf274 = buf272 del buf272 triton_poi_fused_bmm_13[grid(16)](buf255, buf274, 16, XBLOCK=16, num_warps=1, num_stages=1) buf275 = reinterpret_tensor(buf302, (4, 4, 1), (4, 1, 1), 0) del buf302 extern_kernels.bmm(buf271, buf274, out=buf275) buf288 = buf274 del buf274 triton_poi_fused_bmm_14[grid(16)](buf254, buf288, 16, XBLOCK=16, num_warps=1, num_stages=1) buf289 = reinterpret_tensor(buf301, (4, 4, 1), (4, 1, 1), 0) del buf301 extern_kernels.bmm(buf284, buf288, out=buf289) buf290 = buf288 del buf288 triton_poi_fused_bmm_14[grid(16)](buf255, buf290, 16, XBLOCK=16, num_warps=1, num_stages=1) buf291 = reinterpret_tensor(buf286, (4, 4, 1), (4, 1, 1), 0) del buf286 extern_kernels.bmm(buf287, buf290, out=buf291) buf304 = buf290 del buf290 triton_poi_fused_bmm_15[grid(16)](buf254, buf304, 16, XBLOCK=16, num_warps=1, num_stages=1) buf305 = reinterpret_tensor(buf285, (4, 4, 1), (4, 1, 1), 0) del buf285 extern_kernels.bmm(buf300, buf304, out=buf305) buf306 = buf304 del buf304 triton_poi_fused_bmm_15[grid(16)](buf255, buf306, 16, XBLOCK=16, num_warps=1, num_stages=1) buf307 = reinterpret_tensor(buf270, (4, 4, 1), (4, 1, 1), 0) del buf270 extern_kernels.bmm(buf303, buf306, out=buf307) buf320 = buf306 del buf306 triton_poi_fused_bmm_16[grid(16)](buf254, buf320, 16, XBLOCK=16, num_warps=1, num_stages=1) buf321 = reinterpret_tensor(buf269, (4, 4, 1), (4, 1, 1), 0) del buf269 extern_kernels.bmm(buf316, buf320, out=buf321) buf326 = reinterpret_tensor(buf242, (4, 4, 4), (16, 4, 1), 0) del buf242 triton_poi_fused_add_cat_28[grid(64)](buf326, buf273, buf289, buf305, buf321, primals_14, 64, XBLOCK=64, num_warps=1, num_stages=1) buf323 = reinterpret_tensor(buf321, (4, 4, 1), (4, 1, 16), 0) del buf321 triton_poi_fused_bmm_16[grid(16)](buf255, buf323, 16, XBLOCK=16, num_warps=1, num_stages=1) buf324 = buf305 del buf305 extern_kernels.bmm(buf319, buf323, out=buf324) buf328 = reinterpret_tensor(buf243, (4, 4, 4), (16, 4, 1), 0) del buf243 triton_poi_fused_add_cat_28[grid(64)](buf328, buf275, buf291, buf307, buf324, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf327 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_26, reinterpret_tensor(buf326, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf327) buf329 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_28, reinterpret_tensor(buf328, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf329) buf330 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf665 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf327, buf330, buf665, 64, XBLOCK=64, num_warps=1, num_stages=1) buf331 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf330, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf331) buf332 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf664 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf329, buf332, buf664, 64, XBLOCK=64, num_warps=1, num_stages=1) buf333 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf332, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 12), (1, 4), 0), out=buf333) buf334 = reinterpret_tensor(buf331, (4, 4, 12), (48, 12, 1), 0) del buf331 triton_poi_fused_mul_1[grid(192)](buf334, primals_10, primals_7, 192, XBLOCK=256, num_warps=4, num_stages=1) buf335 = reinterpret_tensor(buf333, (4, 4, 12), (48, 12, 1), 0) del buf333 triton_poi_fused_mul_1[grid(192)](buf335, primals_12, primals_8, 192, XBLOCK=256, num_warps=4, num_stages=1) buf336 = reinterpret_tensor(buf324, (4, 4, 1), (4, 1, 16), 0) del buf324 triton_poi_fused_bmm_2[grid(16)](buf334, buf336, 16, XBLOCK=16, num_warps=1, num_stages=1) buf337 = reinterpret_tensor(buf307, (4, 1, 4), (4, 16, 1), 0) del buf307 triton_poi_fused_bmm_3[grid(16)](buf335, buf337, 16, XBLOCK=16, num_warps=1, num_stages=1) buf338 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf336, buf337, out=buf338) buf339 = reinterpret_tensor(buf337, (4, 4, 1), (4, 1, 16), 0) del buf337 triton_poi_fused_bmm_2[grid(16)](buf335, buf339, 16, XBLOCK=16, num_warps=1, num_stages=1) buf340 = reinterpret_tensor(buf336, (4, 1, 4), (4, 16, 1), 0) del buf336 triton_poi_fused_bmm_3[grid(16)](buf334, buf340, 16, XBLOCK=16, num_warps=1, num_stages=1) buf341 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf339, buf340, out=buf341) buf352 = reinterpret_tensor(buf340, (4, 4, 1), (4, 1, 16), 0) del buf340 triton_poi_fused_bmm_4[grid(16)](buf334, buf352, 16, XBLOCK=16, num_warps=1, num_stages=1) buf353 = reinterpret_tensor(buf339, (4, 1, 4), (4, 16, 1), 0) del buf339 triton_poi_fused_bmm_5[grid(16)](buf335, buf353, 16, XBLOCK=16, num_warps=1, num_stages=1) buf354 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf352, buf353, out=buf354) buf368 = reinterpret_tensor(buf353, (4, 4, 1), (4, 1, 16), 0) del buf353 triton_poi_fused_bmm_6[grid(16)](buf334, buf368, 16, XBLOCK=16, num_warps=1, num_stages=1) buf369 = reinterpret_tensor(buf352, (4, 1, 4), (4, 16, 1), 0) del buf352 triton_poi_fused_bmm_7[grid(16)](buf335, buf369, 16, XBLOCK=16, num_warps=1, num_stages=1) buf370 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf368, buf369, out=buf370) buf384 = reinterpret_tensor(buf369, (4, 4, 1), (4, 1, 16), 0) del buf369 triton_poi_fused_bmm_8[grid(16)](buf334, buf384, 16, XBLOCK=16, num_warps=1, num_stages=1) buf385 = reinterpret_tensor(buf368, (4, 1, 4), (4, 16, 1), 0) del buf368 triton_poi_fused_bmm_9[grid(16)](buf335, buf385, 16, XBLOCK=16, num_warps=1, num_stages=1) buf386 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf384, buf385, out=buf386) buf342 = reinterpret_tensor(buf385, (4, 4, 1), (4, 1, 16), 0) del buf385 buf343 = buf384 del buf384 buf358 = reinterpret_tensor(buf291, (4, 4, 1), (4, 1, 16), 0) del buf291 buf359 = reinterpret_tensor(buf275, (4, 4, 1), (4, 1, 16), 0) del buf275 buf374 = buf323 del buf323 buf375 = reinterpret_tensor(buf289, (4, 4, 1), (4, 1, 16), 0) del buf289 buf390 = reinterpret_tensor(buf273, (4, 4, 1), (4, 1, 16), 0) del buf273 buf391 = buf320 del buf320 triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_8, buf338, buf354, buf370, buf386, buf342, buf343, buf358, buf359, buf374, buf375, buf390, buf391, 16, XBLOCK=16, num_warps=1, num_stages=1) buf344 = buf338 del buf338 buf360 = buf354 del buf354 buf376 = buf370 del buf370 buf392 = buf386 del buf386 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf344, buf360, buf376, buf392, primals_8, buf342, buf343, buf358, buf359, buf374, buf375, buf390, buf391, 64, XBLOCK=64, num_warps=1, num_stages=1) buf355 = buf391 del buf391 triton_poi_fused_bmm_4[grid(16)](buf335, buf355, 16, XBLOCK=16, num_warps=1, num_stages=1) buf356 = reinterpret_tensor(buf390, (4, 1, 4), (4, 16, 1), 0) del buf390 triton_poi_fused_bmm_5[grid(16)](buf334, buf356, 16, XBLOCK=16, num_warps=1, num_stages=1) buf357 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf355, buf356, out=buf357) buf371 = reinterpret_tensor(buf356, (4, 4, 1), (4, 1, 16), 0) del buf356 triton_poi_fused_bmm_6[grid(16)](buf335, buf371, 16, XBLOCK=16, num_warps=1, num_stages=1) buf372 = reinterpret_tensor(buf355, (4, 1, 4), (4, 16, 1), 0) del buf355 triton_poi_fused_bmm_7[grid(16)](buf334, buf372, 16, XBLOCK=16, num_warps=1, num_stages=1) buf373 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf371, buf372, out=buf373) buf345 = reinterpret_tensor(buf372, (4, 4, 1), (4, 1, 16), 0) del buf372 buf346 = buf371 del buf371 buf361 = buf375 del buf375 buf362 = buf374 del buf374 buf377 = buf359 del buf359 buf378 = buf358 del buf358 triton_poi_fused__softmax_eq_masked_fill_12[grid(16)](primals_7, buf341, buf357, buf373, buf345, buf346, buf361, buf362, buf377, buf378, 16, XBLOCK=16, num_warps=1, num_stages=1) buf387 = buf343 del buf343 triton_poi_fused_bmm_8[grid(16)](buf335, buf387, 16, XBLOCK=16, num_warps=1, num_stages=1) buf388 = reinterpret_tensor(buf342, (4, 1, 4), (4, 16, 1), 0) del buf342 triton_poi_fused_bmm_9[grid(16)](buf334, buf388, 16, XBLOCK=16, num_warps=1, num_stages=1) buf389 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf387, buf388, out=buf389) buf348 = reinterpret_tensor(buf388, (4, 4, 1), (4, 1, 16), 0) del buf388 triton_poi_fused_bmm_13[grid(16)](buf335, buf348, 16, XBLOCK=16, num_warps=1, num_stages=1) buf349 = reinterpret_tensor(buf387, (4, 4, 1), (4, 1, 1), 0) del buf387 extern_kernels.bmm(buf344, buf348, out=buf349) buf364 = buf348 del buf348 triton_poi_fused_bmm_14[grid(16)](buf335, buf364, 16, XBLOCK=16, num_warps=1, num_stages=1) buf365 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf360, buf364, out=buf365) buf380 = buf364 del buf364 triton_poi_fused_bmm_15[grid(16)](buf335, buf380, 16, XBLOCK=16, num_warps=1, num_stages=1) buf381 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf376, buf380, out=buf381) buf396 = buf380 del buf380 triton_poi_fused_bmm_16[grid(16)](buf335, buf396, 16, XBLOCK=16, num_warps=1, num_stages=1) buf397 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf392, buf396, out=buf397) buf403 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf398 = reinterpret_tensor(buf403, (4, 4, 4), (32, 8, 1), 4) triton_poi_fused_cat_17[grid(64)](buf349, buf365, buf381, buf397, buf398, 64, XBLOCK=64, num_warps=1, num_stages=1) buf402 = reinterpret_tensor(buf403, (4, 4, 4), (32, 8, 1), 0) triton_poi_fused_cat_18[grid(64)](buf327, buf402, 64, XBLOCK=64, num_warps=1, num_stages=1) buf406 = buf327 del buf327 extern_kernels.mm(reinterpret_tensor(buf403, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf406) buf393 = reinterpret_tensor(buf397, (4, 4, 1), (4, 1, 16), 0) del buf397 buf394 = reinterpret_tensor(buf381, (4, 4, 1), (4, 1, 16), 0) del buf381 buf408 = reinterpret_tensor(buf365, (4, 4), (4, 1), 0) del buf365 buf410 = buf408 del buf408 triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19[grid(16)]( buf410, primals_7, buf389, buf406, primals_14, buf393, buf394, 16, XBLOCK=16, num_warps=1, num_stages=1) buf347 = buf341 del buf341 buf363 = buf357 del buf357 buf379 = buf373 del buf373 buf395 = buf389 del buf389 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf347, buf363, buf379, buf395, primals_7, buf345, buf346, buf361, buf362, buf377, buf378, buf393, buf394, 64, XBLOCK=64, num_warps=1, num_stages=1) buf350 = buf394 del buf394 triton_poi_fused_bmm_13[grid(16)](buf334, buf350, 16, XBLOCK=16, num_warps=1, num_stages=1) buf351 = reinterpret_tensor(buf393, (4, 4, 1), (4, 1, 1), 0) del buf393 extern_kernels.bmm(buf347, buf350, out=buf351) buf366 = buf350 del buf350 triton_poi_fused_bmm_14[grid(16)](buf334, buf366, 16, XBLOCK=16, num_warps=1, num_stages=1) buf367 = reinterpret_tensor(buf378, (4, 4, 1), (4, 1, 1), 0) del buf378 extern_kernels.bmm(buf363, buf366, out=buf367) buf382 = buf366 del buf366 triton_poi_fused_bmm_15[grid(16)](buf334, buf382, 16, XBLOCK=16, num_warps=1, num_stages=1) buf383 = reinterpret_tensor(buf377, (4, 4, 1), (4, 1, 1), 0) del buf377 extern_kernels.bmm(buf379, buf382, out=buf383) buf399 = buf382 del buf382 triton_poi_fused_bmm_16[grid(16)](buf334, buf399, 16, XBLOCK=16, num_warps=1, num_stages=1) buf400 = reinterpret_tensor(buf362, (4, 4, 1), (4, 1, 1), 0) del buf362 extern_kernels.bmm(buf395, buf399, out=buf400) buf405 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf401 = reinterpret_tensor(buf405, (4, 4, 4), (32, 8, 1), 4) triton_poi_fused_cat_17[grid(64)](buf351, buf367, buf383, buf400, buf401, 64, XBLOCK=64, num_warps=1, num_stages=1) buf404 = reinterpret_tensor(buf405, (4, 4, 4), (32, 8, 1), 0) triton_poi_fused_cat_18[grid(64)](buf329, buf404, 64, XBLOCK=64, num_warps=1, num_stages=1) buf407 = buf329 del buf329 extern_kernels.mm(reinterpret_tensor(buf405, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf407) buf409 = reinterpret_tensor(buf400, (4, 4), (4, 1), 0) del buf400 buf412 = buf409 del buf409 triton_poi_fused_div_mul_relu_sum_20[grid(16)](buf412, buf407, primals_16, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf411 = reinterpret_tensor(buf383, (4, 4), (4, 1), 0) del buf383 extern_kernels.addmm(primals_18, buf410, reinterpret_tensor( primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf411) buf413 = reinterpret_tensor(buf367, (4, 4), (4, 1), 0) del buf367 extern_kernels.addmm(primals_20, buf412, reinterpret_tensor( primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf413) buf414 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf663 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_21[grid(64)](buf406, primals_14, buf414, buf663, 64, XBLOCK=64, num_warps=1, num_stages=1) buf415 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf414, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 12), (1, 4), 0), out=buf415) buf416 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf662 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_21[grid(64)](buf407, primals_16, buf416, buf662, 64, XBLOCK=64, num_warps=1, num_stages=1) buf417 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf416, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf417) buf418 = reinterpret_tensor(buf415, (4, 4, 12), (48, 12, 1), 0) del buf415 triton_poi_fused_mul_1[grid(192)](buf418, primals_22, primals_7, 192, XBLOCK=256, num_warps=4, num_stages=1) buf419 = reinterpret_tensor(buf417, (4, 4, 12), (48, 12, 1), 0) del buf417 triton_poi_fused_mul_1[grid(192)](buf419, primals_24, primals_8, 192, XBLOCK=256, num_warps=4, num_stages=1) buf420 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf421 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_22[grid(64)](buf413, buf418, buf420, buf421, 64, XBLOCK=64, num_warps=1, num_stages=1) buf422 = reinterpret_tensor(buf351, (4, 4, 1), (4, 1, 16), 0) del buf351 triton_poi_fused_bmm_23[grid(16)](buf420, buf422, 16, XBLOCK=16, num_warps=1, num_stages=1) buf423 = reinterpret_tensor(buf399, (4, 1, 4), (4, 16, 1), 0) del buf399 triton_poi_fused_bmm_23[grid(16)](buf421, buf423, 16, XBLOCK=16, num_warps=1, num_stages=1) buf424 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf422, buf423, out=buf424) buf425 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf426 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_22[grid(64)](buf411, buf419, buf425, buf426, 64, XBLOCK=64, num_warps=1, num_stages=1) buf427 = reinterpret_tensor(buf423, (4, 4, 1), (4, 1, 16), 0) del buf423 triton_poi_fused_bmm_23[grid(16)](buf425, buf427, 16, XBLOCK=16, num_warps=1, num_stages=1) buf428 = reinterpret_tensor(buf422, (4, 1, 4), (4, 16, 1), 0) del buf422 triton_poi_fused_bmm_23[grid(16)](buf426, buf428, 16, XBLOCK=16, num_warps=1, num_stages=1) buf429 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf427, buf428, out=buf429) buf440 = reinterpret_tensor(buf428, (4, 4, 1), (4, 1, 16), 0) del buf428 triton_poi_fused_bmm_24[grid(16)](buf420, buf440, 16, XBLOCK=16, num_warps=1, num_stages=1) buf441 = reinterpret_tensor(buf427, (4, 1, 4), (4, 16, 1), 0) del buf427 triton_poi_fused_bmm_24[grid(16)](buf421, buf441, 16, XBLOCK=16, num_warps=1, num_stages=1) buf442 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf440, buf441, out=buf442) buf456 = reinterpret_tensor(buf441, (4, 4, 1), (4, 1, 16), 0) del buf441 triton_poi_fused_bmm_25[grid(16)](buf420, buf456, 16, XBLOCK=16, num_warps=1, num_stages=1) buf457 = reinterpret_tensor(buf440, (4, 1, 4), (4, 16, 1), 0) del buf440 triton_poi_fused_bmm_25[grid(16)](buf421, buf457, 16, XBLOCK=16, num_warps=1, num_stages=1) buf458 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf456, buf457, out=buf458) buf472 = reinterpret_tensor(buf457, (4, 4, 1), (4, 1, 16), 0) del buf457 triton_poi_fused_bmm_26[grid(16)](buf420, buf472, 16, XBLOCK=16, num_warps=1, num_stages=1) buf473 = reinterpret_tensor(buf456, (4, 1, 4), (4, 16, 1), 0) del buf456 triton_poi_fused_bmm_26[grid(16)](buf421, buf473, 16, XBLOCK=16, num_warps=1, num_stages=1) buf474 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf472, buf473, out=buf474) buf430 = reinterpret_tensor(buf473, (4, 4, 1), (4, 1, 16), 0) del buf473 buf431 = buf472 del buf472 buf446 = buf361 del buf361 buf447 = buf346 del buf346 buf462 = buf345 del buf345 buf463 = reinterpret_tensor(buf349, (4, 4, 1), (4, 1, 16), 0) del buf349 buf478 = buf396 del buf396 buf479 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_7, buf424, buf442, buf458, buf474, buf430, buf431, buf446, buf447, buf462, buf463, buf478, buf479, 16, XBLOCK=16, num_warps=1, num_stages=1) buf432 = buf424 del buf424 buf448 = buf442 del buf442 buf464 = buf458 del buf458 buf480 = buf474 del buf474 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf432, buf448, buf464, buf480, primals_7, buf430, buf431, buf446, buf447, buf462, buf463, buf478, buf479, 64, XBLOCK=64, num_warps=1, num_stages=1) buf443 = buf479 del buf479 triton_poi_fused_bmm_24[grid(16)](buf425, buf443, 16, XBLOCK=16, num_warps=1, num_stages=1) buf444 = reinterpret_tensor(buf478, (4, 1, 4), (4, 16, 1), 0) del buf478 triton_poi_fused_bmm_24[grid(16)](buf426, buf444, 16, XBLOCK=16, num_warps=1, num_stages=1) buf445 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf443, buf444, out=buf445) buf459 = reinterpret_tensor(buf444, (4, 4, 1), (4, 1, 16), 0) del buf444 triton_poi_fused_bmm_25[grid(16)](buf425, buf459, 16, XBLOCK=16, num_warps=1, num_stages=1) buf460 = reinterpret_tensor(buf443, (4, 1, 4), (4, 16, 1), 0) del buf443 triton_poi_fused_bmm_25[grid(16)](buf426, buf460, 16, XBLOCK=16, num_warps=1, num_stages=1) buf461 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf459, buf460, out=buf461) buf475 = reinterpret_tensor(buf460, (4, 4, 1), (4, 1, 16), 0) del buf460 triton_poi_fused_bmm_26[grid(16)](buf425, buf475, 16, XBLOCK=16, num_warps=1, num_stages=1) buf476 = reinterpret_tensor(buf459, (4, 1, 4), (4, 16, 1), 0) del buf459 triton_poi_fused_bmm_26[grid(16)](buf426, buf476, 16, XBLOCK=16, num_warps=1, num_stages=1) buf477 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf475, buf476, out=buf477) buf433 = reinterpret_tensor(buf476, (4, 4, 1), (4, 1, 16), 0) del buf476 buf434 = buf475 del buf475 buf449 = buf463 del buf463 buf450 = buf462 del buf462 buf465 = buf447 del buf447 buf466 = buf446 del buf446 buf481 = buf431 del buf431 buf482 = buf430 del buf430 triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_8, buf429, buf445, buf461, buf477, buf433, buf434, buf449, buf450, buf465, buf466, buf481, buf482, 16, XBLOCK=16, num_warps=1, num_stages=1) buf435 = buf429 del buf429 buf451 = buf445 del buf445 buf467 = buf461 del buf461 buf483 = buf477 del buf477 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf435, buf451, buf467, buf483, primals_8, buf433, buf434, buf449, buf450, buf465, buf466, buf481, buf482, 64, XBLOCK=64, num_warps=1, num_stages=1) buf436 = buf482 del buf482 triton_poi_fused_bmm_13[grid(16)](buf418, buf436, 16, XBLOCK=16, num_warps=1, num_stages=1) buf437 = reinterpret_tensor(buf481, (4, 4, 1), (4, 1, 1), 0) del buf481 extern_kernels.bmm(buf432, buf436, out=buf437) buf438 = buf436 del buf436 triton_poi_fused_bmm_13[grid(16)](buf419, buf438, 16, XBLOCK=16, num_warps=1, num_stages=1) buf439 = reinterpret_tensor(buf466, (4, 4, 1), (4, 1, 1), 0) del buf466 extern_kernels.bmm(buf435, buf438, out=buf439) buf452 = buf438 del buf438 triton_poi_fused_bmm_14[grid(16)](buf418, buf452, 16, XBLOCK=16, num_warps=1, num_stages=1) buf453 = reinterpret_tensor(buf465, (4, 4, 1), (4, 1, 1), 0) del buf465 extern_kernels.bmm(buf448, buf452, out=buf453) buf454 = buf452 del buf452 triton_poi_fused_bmm_14[grid(16)](buf419, buf454, 16, XBLOCK=16, num_warps=1, num_stages=1) buf455 = reinterpret_tensor(buf450, (4, 4, 1), (4, 1, 1), 0) del buf450 extern_kernels.bmm(buf451, buf454, out=buf455) buf468 = buf454 del buf454 triton_poi_fused_bmm_15[grid(16)](buf418, buf468, 16, XBLOCK=16, num_warps=1, num_stages=1) buf469 = reinterpret_tensor(buf449, (4, 4, 1), (4, 1, 1), 0) del buf449 extern_kernels.bmm(buf464, buf468, out=buf469) buf470 = buf468 del buf468 triton_poi_fused_bmm_15[grid(16)](buf419, buf470, 16, XBLOCK=16, num_warps=1, num_stages=1) buf471 = reinterpret_tensor(buf434, (4, 4, 1), (4, 1, 1), 0) del buf434 extern_kernels.bmm(buf467, buf470, out=buf471) buf484 = buf470 del buf470 triton_poi_fused_bmm_16[grid(16)](buf418, buf484, 16, XBLOCK=16, num_warps=1, num_stages=1) buf485 = reinterpret_tensor(buf433, (4, 4, 1), (4, 1, 1), 0) del buf433 extern_kernels.bmm(buf480, buf484, out=buf485) buf490 = reinterpret_tensor(buf406, (4, 4, 4), (16, 4, 1), 0) del buf406 triton_poi_fused_add_cat_28[grid(64)](buf490, buf437, buf453, buf469, buf485, primals_14, 64, XBLOCK=64, num_warps=1, num_stages=1) buf487 = reinterpret_tensor(buf485, (4, 4, 1), (4, 1, 16), 0) del buf485 triton_poi_fused_bmm_16[grid(16)](buf419, buf487, 16, XBLOCK=16, num_warps=1, num_stages=1) buf488 = buf469 del buf469 extern_kernels.bmm(buf483, buf487, out=buf488) buf492 = reinterpret_tensor(buf407, (4, 4, 4), (16, 4, 1), 0) del buf407 triton_poi_fused_add_cat_28[grid(64)](buf492, buf439, buf455, buf471, buf488, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf491 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_26, reinterpret_tensor(buf490, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf491) buf493 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_28, reinterpret_tensor(buf492, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf493) buf494 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf661 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf491, buf494, buf661, 64, XBLOCK=64, num_warps=1, num_stages=1) buf495 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf494, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 12), (1, 4), 0), out=buf495) buf496 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf660 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf493, buf496, buf660, 64, XBLOCK=64, num_warps=1, num_stages=1) buf497 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf496, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 12), (1, 4), 0), out=buf497) buf498 = reinterpret_tensor(buf495, (4, 4, 12), (48, 12, 1), 0) del buf495 triton_poi_fused_mul_1[grid(192)](buf498, primals_10, primals_7, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_10 buf499 = reinterpret_tensor(buf497, (4, 4, 12), (48, 12, 1), 0) del buf497 triton_poi_fused_mul_1[grid(192)](buf499, primals_12, primals_8, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_12 buf500 = reinterpret_tensor(buf488, (4, 4, 1), (4, 1, 16), 0) del buf488 triton_poi_fused_bmm_2[grid(16)](buf498, buf500, 16, XBLOCK=16, num_warps=1, num_stages=1) buf501 = reinterpret_tensor(buf471, (4, 1, 4), (4, 16, 1), 0) del buf471 triton_poi_fused_bmm_3[grid(16)](buf499, buf501, 16, XBLOCK=16, num_warps=1, num_stages=1) buf502 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf500, buf501, out=buf502) buf503 = reinterpret_tensor(buf501, (4, 4, 1), (4, 1, 16), 0) del buf501 triton_poi_fused_bmm_2[grid(16)](buf499, buf503, 16, XBLOCK=16, num_warps=1, num_stages=1) buf504 = reinterpret_tensor(buf500, (4, 1, 4), (4, 16, 1), 0) del buf500 triton_poi_fused_bmm_3[grid(16)](buf498, buf504, 16, XBLOCK=16, num_warps=1, num_stages=1) buf505 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf503, buf504, out=buf505) buf516 = reinterpret_tensor(buf504, (4, 4, 1), (4, 1, 16), 0) del buf504 triton_poi_fused_bmm_4[grid(16)](buf498, buf516, 16, XBLOCK=16, num_warps=1, num_stages=1) buf517 = reinterpret_tensor(buf503, (4, 1, 4), (4, 16, 1), 0) del buf503 triton_poi_fused_bmm_5[grid(16)](buf499, buf517, 16, XBLOCK=16, num_warps=1, num_stages=1) buf518 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf516, buf517, out=buf518) buf532 = reinterpret_tensor(buf517, (4, 4, 1), (4, 1, 16), 0) del buf517 triton_poi_fused_bmm_6[grid(16)](buf498, buf532, 16, XBLOCK=16, num_warps=1, num_stages=1) buf533 = reinterpret_tensor(buf516, (4, 1, 4), (4, 16, 1), 0) del buf516 triton_poi_fused_bmm_7[grid(16)](buf499, buf533, 16, XBLOCK=16, num_warps=1, num_stages=1) buf534 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf532, buf533, out=buf534) buf548 = reinterpret_tensor(buf533, (4, 4, 1), (4, 1, 16), 0) del buf533 triton_poi_fused_bmm_8[grid(16)](buf498, buf548, 16, XBLOCK=16, num_warps=1, num_stages=1) buf549 = reinterpret_tensor(buf532, (4, 1, 4), (4, 16, 1), 0) del buf532 triton_poi_fused_bmm_9[grid(16)](buf499, buf549, 16, XBLOCK=16, num_warps=1, num_stages=1) buf550 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf548, buf549, out=buf550) buf506 = reinterpret_tensor(buf549, (4, 4, 1), (4, 1, 16), 0) del buf549 buf507 = buf548 del buf548 buf522 = reinterpret_tensor(buf455, (4, 4, 1), (4, 1, 16), 0) del buf455 buf523 = reinterpret_tensor(buf439, (4, 4, 1), (4, 1, 16), 0) del buf439 buf538 = buf487 del buf487 buf539 = reinterpret_tensor(buf453, (4, 4, 1), (4, 1, 16), 0) del buf453 buf554 = reinterpret_tensor(buf437, (4, 4, 1), (4, 1, 16), 0) del buf437 buf555 = buf484 del buf484 triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_8, buf502, buf518, buf534, buf550, buf506, buf507, buf522, buf523, buf538, buf539, buf554, buf555, 16, XBLOCK=16, num_warps=1, num_stages=1) buf508 = buf502 del buf502 buf524 = buf518 del buf518 buf540 = buf534 del buf534 buf556 = buf550 del buf550 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf508, buf524, buf540, buf556, primals_8, buf506, buf507, buf522, buf523, buf538, buf539, buf554, buf555, 64, XBLOCK=64, num_warps=1, num_stages=1) buf519 = buf555 del buf555 triton_poi_fused_bmm_4[grid(16)](buf499, buf519, 16, XBLOCK=16, num_warps=1, num_stages=1) buf520 = reinterpret_tensor(buf554, (4, 1, 4), (4, 16, 1), 0) del buf554 triton_poi_fused_bmm_5[grid(16)](buf498, buf520, 16, XBLOCK=16, num_warps=1, num_stages=1) buf521 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf519, buf520, out=buf521) buf535 = reinterpret_tensor(buf520, (4, 4, 1), (4, 1, 16), 0) del buf520 triton_poi_fused_bmm_6[grid(16)](buf499, buf535, 16, XBLOCK=16, num_warps=1, num_stages=1) buf536 = reinterpret_tensor(buf519, (4, 1, 4), (4, 16, 1), 0) del buf519 triton_poi_fused_bmm_7[grid(16)](buf498, buf536, 16, XBLOCK=16, num_warps=1, num_stages=1) buf537 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf535, buf536, out=buf537) buf509 = reinterpret_tensor(buf536, (4, 4, 1), (4, 1, 16), 0) del buf536 buf510 = buf535 del buf535 buf525 = buf539 del buf539 buf526 = buf538 del buf538 buf541 = buf523 del buf523 buf542 = buf522 del buf522 triton_poi_fused__softmax_eq_masked_fill_12[grid(16)](primals_7, buf505, buf521, buf537, buf509, buf510, buf525, buf526, buf541, buf542, 16, XBLOCK=16, num_warps=1, num_stages=1) buf551 = buf507 del buf507 triton_poi_fused_bmm_8[grid(16)](buf499, buf551, 16, XBLOCK=16, num_warps=1, num_stages=1) buf552 = reinterpret_tensor(buf506, (4, 1, 4), (4, 16, 1), 0) del buf506 triton_poi_fused_bmm_9[grid(16)](buf498, buf552, 16, XBLOCK=16, num_warps=1, num_stages=1) buf553 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf551, buf552, out=buf553) buf512 = reinterpret_tensor(buf552, (4, 4, 1), (4, 1, 16), 0) del buf552 triton_poi_fused_bmm_13[grid(16)](buf499, buf512, 16, XBLOCK=16, num_warps=1, num_stages=1) buf513 = reinterpret_tensor(buf551, (4, 4, 1), (4, 1, 1), 0) del buf551 extern_kernels.bmm(buf508, buf512, out=buf513) buf528 = buf512 del buf512 triton_poi_fused_bmm_14[grid(16)](buf499, buf528, 16, XBLOCK=16, num_warps=1, num_stages=1) buf529 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf524, buf528, out=buf529) buf544 = buf528 del buf528 triton_poi_fused_bmm_15[grid(16)](buf499, buf544, 16, XBLOCK=16, num_warps=1, num_stages=1) buf545 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf540, buf544, out=buf545) buf560 = buf544 del buf544 triton_poi_fused_bmm_16[grid(16)](buf499, buf560, 16, XBLOCK=16, num_warps=1, num_stages=1) buf561 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf556, buf560, out=buf561) buf567 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf562 = reinterpret_tensor(buf567, (4, 4, 4), (32, 8, 1), 4) triton_poi_fused_cat_17[grid(64)](buf513, buf529, buf545, buf561, buf562, 64, XBLOCK=64, num_warps=1, num_stages=1) buf566 = reinterpret_tensor(buf567, (4, 4, 4), (32, 8, 1), 0) triton_poi_fused_cat_18[grid(64)](buf491, buf566, 64, XBLOCK=64, num_warps=1, num_stages=1) buf570 = buf491 del buf491 extern_kernels.mm(reinterpret_tensor(buf567, (16, 8), (8, 1), 0), reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), out=buf570) buf557 = reinterpret_tensor(buf561, (4, 4, 1), (4, 1, 16), 0) del buf561 buf558 = reinterpret_tensor(buf545, (4, 4, 1), (4, 1, 16), 0) del buf545 buf572 = reinterpret_tensor(buf529, (4, 4), (4, 1), 0) del buf529 buf574 = buf572 del buf572 triton_poi_fused__softmax_div_eq_masked_fill_mul_relu_sum_19[grid(16)]( buf574, primals_7, buf553, buf570, primals_14, buf557, buf558, 16, XBLOCK=16, num_warps=1, num_stages=1) buf511 = buf505 del buf505 buf527 = buf521 del buf521 buf543 = buf537 del buf537 buf559 = buf553 del buf553 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf511, buf527, buf543, buf559, primals_7, buf509, buf510, buf525, buf526, buf541, buf542, buf557, buf558, 64, XBLOCK=64, num_warps=1, num_stages=1) buf514 = buf558 del buf558 triton_poi_fused_bmm_13[grid(16)](buf498, buf514, 16, XBLOCK=16, num_warps=1, num_stages=1) buf515 = reinterpret_tensor(buf557, (4, 4, 1), (4, 1, 1), 0) del buf557 extern_kernels.bmm(buf511, buf514, out=buf515) buf530 = buf514 del buf514 triton_poi_fused_bmm_14[grid(16)](buf498, buf530, 16, XBLOCK=16, num_warps=1, num_stages=1) buf531 = reinterpret_tensor(buf542, (4, 4, 1), (4, 1, 1), 0) del buf542 extern_kernels.bmm(buf527, buf530, out=buf531) buf546 = buf530 del buf530 triton_poi_fused_bmm_15[grid(16)](buf498, buf546, 16, XBLOCK=16, num_warps=1, num_stages=1) buf547 = reinterpret_tensor(buf541, (4, 4, 1), (4, 1, 1), 0) del buf541 extern_kernels.bmm(buf543, buf546, out=buf547) buf563 = buf546 del buf546 triton_poi_fused_bmm_16[grid(16)](buf498, buf563, 16, XBLOCK=16, num_warps=1, num_stages=1) buf564 = reinterpret_tensor(buf526, (4, 4, 1), (4, 1, 1), 0) del buf526 extern_kernels.bmm(buf559, buf563, out=buf564) buf569 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf565 = reinterpret_tensor(buf569, (4, 4, 4), (32, 8, 1), 4) triton_poi_fused_cat_17[grid(64)](buf515, buf531, buf547, buf564, buf565, 64, XBLOCK=64, num_warps=1, num_stages=1) buf568 = reinterpret_tensor(buf569, (4, 4, 4), (32, 8, 1), 0) triton_poi_fused_cat_18[grid(64)](buf493, buf568, 64, XBLOCK=64, num_warps=1, num_stages=1) buf571 = buf493 del buf493 extern_kernels.mm(reinterpret_tensor(buf569, (16, 8), (8, 1), 0), reinterpret_tensor(primals_15, (8, 4), (1, 8), 0), out=buf571) buf573 = reinterpret_tensor(buf564, (4, 4), (4, 1), 0) del buf564 buf576 = buf573 del buf573 triton_poi_fused_div_mul_relu_sum_20[grid(16)](buf576, buf571, primals_16, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf575 = reinterpret_tensor(buf547, (4, 4), (4, 1), 0) del buf547 extern_kernels.addmm(primals_18, buf574, reinterpret_tensor( primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf575) del primals_18 buf577 = reinterpret_tensor(buf531, (4, 4), (4, 1), 0) del buf531 extern_kernels.addmm(primals_20, buf576, reinterpret_tensor( primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf577) del primals_20 buf578 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf659 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_21[grid(64)](buf570, primals_14, buf578, buf659, 64, XBLOCK=64, num_warps=1, num_stages=1) buf579 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf578, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 12), (1, 4), 0), out=buf579) buf580 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf658 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_21[grid(64)](buf571, primals_16, buf580, buf658, 64, XBLOCK=64, num_warps=1, num_stages=1) buf581 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf580, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 12), (1, 4), 0), out=buf581) buf582 = reinterpret_tensor(buf579, (4, 4, 12), (48, 12, 1), 0) del buf579 triton_poi_fused_mul_1[grid(192)](buf582, primals_22, primals_7, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_22 buf583 = reinterpret_tensor(buf581, (4, 4, 12), (48, 12, 1), 0) del buf581 triton_poi_fused_mul_1[grid(192)](buf583, primals_24, primals_8, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_24 buf584 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf585 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_22[grid(64)](buf577, buf582, buf584, buf585, 64, XBLOCK=64, num_warps=1, num_stages=1) buf586 = reinterpret_tensor(buf515, (4, 4, 1), (4, 1, 16), 0) del buf515 triton_poi_fused_bmm_23[grid(16)](buf584, buf586, 16, XBLOCK=16, num_warps=1, num_stages=1) buf587 = reinterpret_tensor(buf563, (4, 1, 4), (4, 16, 1), 0) del buf563 triton_poi_fused_bmm_23[grid(16)](buf585, buf587, 16, XBLOCK=16, num_warps=1, num_stages=1) buf588 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf586, buf587, out=buf588) buf589 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf590 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_22[grid(64)](buf575, buf583, buf589, buf590, 64, XBLOCK=64, num_warps=1, num_stages=1) buf591 = reinterpret_tensor(buf587, (4, 4, 1), (4, 1, 16), 0) del buf587 triton_poi_fused_bmm_23[grid(16)](buf589, buf591, 16, XBLOCK=16, num_warps=1, num_stages=1) buf592 = reinterpret_tensor(buf586, (4, 1, 4), (4, 16, 1), 0) del buf586 triton_poi_fused_bmm_23[grid(16)](buf590, buf592, 16, XBLOCK=16, num_warps=1, num_stages=1) buf593 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf591, buf592, out=buf593) buf604 = reinterpret_tensor(buf592, (4, 4, 1), (4, 1, 16), 0) del buf592 triton_poi_fused_bmm_24[grid(16)](buf584, buf604, 16, XBLOCK=16, num_warps=1, num_stages=1) buf605 = reinterpret_tensor(buf591, (4, 1, 4), (4, 16, 1), 0) del buf591 triton_poi_fused_bmm_24[grid(16)](buf585, buf605, 16, XBLOCK=16, num_warps=1, num_stages=1) buf606 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf604, buf605, out=buf606) buf620 = reinterpret_tensor(buf605, (4, 4, 1), (4, 1, 16), 0) del buf605 triton_poi_fused_bmm_25[grid(16)](buf584, buf620, 16, XBLOCK=16, num_warps=1, num_stages=1) buf621 = reinterpret_tensor(buf604, (4, 1, 4), (4, 16, 1), 0) del buf604 triton_poi_fused_bmm_25[grid(16)](buf585, buf621, 16, XBLOCK=16, num_warps=1, num_stages=1) buf622 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf620, buf621, out=buf622) buf636 = reinterpret_tensor(buf621, (4, 4, 1), (4, 1, 16), 0) del buf621 triton_poi_fused_bmm_26[grid(16)](buf584, buf636, 16, XBLOCK=16, num_warps=1, num_stages=1) buf637 = reinterpret_tensor(buf620, (4, 1, 4), (4, 16, 1), 0) del buf620 triton_poi_fused_bmm_26[grid(16)](buf585, buf637, 16, XBLOCK=16, num_warps=1, num_stages=1) buf638 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf636, buf637, out=buf638) buf594 = reinterpret_tensor(buf637, (4, 4, 1), (4, 1, 16), 0) del buf637 buf595 = buf636 del buf636 buf610 = buf525 del buf525 buf611 = buf510 del buf510 buf626 = buf509 del buf509 buf627 = reinterpret_tensor(buf513, (4, 4, 1), (4, 1, 16), 0) del buf513 buf642 = buf560 del buf560 buf643 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_7, buf588, buf606, buf622, buf638, buf594, buf595, buf610, buf611, buf626, buf627, buf642, buf643, 16, XBLOCK=16, num_warps=1, num_stages=1) buf596 = buf588 del buf588 buf612 = buf606 del buf606 buf628 = buf622 del buf622 buf644 = buf638 del buf638 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf596, buf612, buf628, buf644, primals_7, buf594, buf595, buf610, buf611, buf626, buf627, buf642, buf643, 64, XBLOCK=64, num_warps=1, num_stages=1) buf607 = buf643 del buf643 triton_poi_fused_bmm_24[grid(16)](buf589, buf607, 16, XBLOCK=16, num_warps=1, num_stages=1) buf608 = reinterpret_tensor(buf642, (4, 1, 4), (4, 16, 1), 0) del buf642 triton_poi_fused_bmm_24[grid(16)](buf590, buf608, 16, XBLOCK=16, num_warps=1, num_stages=1) buf609 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf607, buf608, out=buf609) buf623 = reinterpret_tensor(buf608, (4, 4, 1), (4, 1, 16), 0) del buf608 triton_poi_fused_bmm_25[grid(16)](buf589, buf623, 16, XBLOCK=16, num_warps=1, num_stages=1) buf624 = reinterpret_tensor(buf607, (4, 1, 4), (4, 16, 1), 0) del buf607 triton_poi_fused_bmm_25[grid(16)](buf590, buf624, 16, XBLOCK=16, num_warps=1, num_stages=1) buf625 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf623, buf624, out=buf625) buf639 = reinterpret_tensor(buf624, (4, 4, 1), (4, 1, 16), 0) del buf624 triton_poi_fused_bmm_26[grid(16)](buf589, buf639, 16, XBLOCK=16, num_warps=1, num_stages=1) buf640 = reinterpret_tensor(buf623, (4, 1, 4), (4, 16, 1), 0) del buf623 triton_poi_fused_bmm_26[grid(16)](buf590, buf640, 16, XBLOCK=16, num_warps=1, num_stages=1) buf641 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf639, buf640, out=buf641) buf597 = reinterpret_tensor(buf640, (4, 4, 1), (4, 1, 16), 0) del buf640 buf598 = buf639 del buf639 buf613 = buf627 del buf627 buf614 = buf626 del buf626 buf629 = buf611 del buf611 buf630 = buf610 del buf610 buf645 = buf595 del buf595 buf646 = buf594 del buf594 triton_poi_fused__softmax_eq_masked_fill_10[grid(16)](primals_8, buf593, buf609, buf625, buf641, buf597, buf598, buf613, buf614, buf629, buf630, buf645, buf646, 16, XBLOCK=16, num_warps=1, num_stages=1) buf599 = buf593 del buf593 buf615 = buf609 del buf609 buf631 = buf625 del buf625 buf647 = buf641 del buf641 triton_poi_fused__softmax_eq_masked_fill_11[grid(64)](buf599, buf615, buf631, buf647, primals_8, buf597, buf598, buf613, buf614, buf629, buf630, buf645, buf646, 64, XBLOCK=64, num_warps=1, num_stages=1) buf600 = buf646 del buf646 triton_poi_fused_bmm_13[grid(16)](buf582, buf600, 16, XBLOCK=16, num_warps=1, num_stages=1) buf601 = reinterpret_tensor(buf645, (4, 4, 1), (4, 1, 1), 0) del buf645 extern_kernels.bmm(buf596, buf600, out=buf601) buf602 = buf600 del buf600 triton_poi_fused_bmm_13[grid(16)](buf583, buf602, 16, XBLOCK=16, num_warps=1, num_stages=1) buf603 = reinterpret_tensor(buf630, (4, 4, 1), (4, 1, 1), 0) del buf630 extern_kernels.bmm(buf599, buf602, out=buf603) buf616 = buf602 del buf602 triton_poi_fused_bmm_14[grid(16)](buf582, buf616, 16, XBLOCK=16, num_warps=1, num_stages=1) buf617 = reinterpret_tensor(buf629, (4, 4, 1), (4, 1, 1), 0) del buf629 extern_kernels.bmm(buf612, buf616, out=buf617) buf618 = buf616 del buf616 triton_poi_fused_bmm_14[grid(16)](buf583, buf618, 16, XBLOCK=16, num_warps=1, num_stages=1) buf619 = reinterpret_tensor(buf614, (4, 4, 1), (4, 1, 1), 0) del buf614 extern_kernels.bmm(buf615, buf618, out=buf619) buf632 = buf618 del buf618 triton_poi_fused_bmm_15[grid(16)](buf582, buf632, 16, XBLOCK=16, num_warps=1, num_stages=1) buf633 = reinterpret_tensor(buf613, (4, 4, 1), (4, 1, 1), 0) del buf613 extern_kernels.bmm(buf628, buf632, out=buf633) buf634 = buf632 del buf632 triton_poi_fused_bmm_15[grid(16)](buf583, buf634, 16, XBLOCK=16, num_warps=1, num_stages=1) buf635 = reinterpret_tensor(buf598, (4, 4, 1), (4, 1, 1), 0) del buf598 extern_kernels.bmm(buf631, buf634, out=buf635) buf648 = buf634 del buf634 triton_poi_fused_bmm_16[grid(16)](buf582, buf648, 16, XBLOCK=16, num_warps=1, num_stages=1) buf649 = reinterpret_tensor(buf597, (4, 4, 1), (4, 1, 1), 0) del buf597 extern_kernels.bmm(buf644, buf648, out=buf649) del buf648 buf654 = reinterpret_tensor(buf570, (4, 4, 4), (16, 4, 1), 0) del buf570 triton_poi_fused_add_cat_28[grid(64)](buf654, buf601, buf617, buf633, buf649, primals_14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf601 del buf617 del primals_14 buf651 = reinterpret_tensor(buf649, (4, 4, 1), (4, 1, 16), 0) del buf649 triton_poi_fused_bmm_16[grid(16)](buf583, buf651, 16, XBLOCK=16, num_warps=1, num_stages=1) buf652 = buf633 del buf633 extern_kernels.bmm(buf647, buf651, out=buf652) del buf651 buf656 = reinterpret_tensor(buf571, (4, 4, 4), (16, 4, 1), 0) del buf571 triton_poi_fused_add_cat_28[grid(64)](buf656, buf603, buf619, buf635, buf652, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf603 del buf619 del buf635 del buf652 del primals_16 buf655 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_26, reinterpret_tensor(buf654, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf655) del primals_26 buf657 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_28, reinterpret_tensor(buf656, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf657) del primals_28 return (reinterpret_tensor(buf655, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf657, (4, 4, 4), (16, 4, 1), 0), primals_7, primals_8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor( buf4, (16, 4), (4, 1), 0), buf16, buf19, buf32, buf35, buf48, buf51, buf64, buf67, reinterpret_tensor(buf75, (16, 8), (8, 1), 0), reinterpret_tensor(buf77, (16, 8), (8, 1), 0), buf82, buf83, buf84, buf85, reinterpret_tensor(buf86, (16, 4), (4, 1), 0), reinterpret_tensor(buf88, (16, 4), (4, 1), 0), reinterpret_tensor( buf90, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf90, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf91, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf91, (4, 4, 4), (48, 12, 1), 4), buf104, buf107, buf120, buf123, buf136, buf139, buf152, buf155, reinterpret_tensor(buf162, (16, 4), (4, 1), 0), reinterpret_tensor( buf164, (16, 4), (4, 1), 0), reinterpret_tensor(buf166, (16, 4), (4, 1), 0), reinterpret_tensor(buf168, (16, 4), (4, 1), 0), buf180, buf183, buf196, buf199, buf212, buf215, buf228, buf231, reinterpret_tensor(buf239, (16, 8), (8, 1), 0), reinterpret_tensor( buf241, (16, 8), (8, 1), 0), buf246, buf247, buf248, buf249, reinterpret_tensor(buf250, (16, 4), (4, 1), 0), reinterpret_tensor( buf252, (16, 4), (4, 1), 0), reinterpret_tensor(buf254, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf254, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf255, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf255, (4, 4, 4), (48, 12, 1), 4), buf268, buf271, buf284, buf287, buf300, buf303, buf316, buf319, reinterpret_tensor(buf326, (16, 4), (4, 1), 0), reinterpret_tensor( buf328, (16, 4), (4, 1), 0), reinterpret_tensor(buf330, (16, 4), (4, 1), 0), reinterpret_tensor(buf332, (16, 4), (4, 1), 0), buf344, buf347, buf360, buf363, buf376, buf379, buf392, buf395, reinterpret_tensor(buf403, (16, 8), (8, 1), 0), reinterpret_tensor( buf405, (16, 8), (8, 1), 0), buf410, buf411, buf412, buf413, reinterpret_tensor(buf414, (16, 4), (4, 1), 0), reinterpret_tensor( buf416, (16, 4), (4, 1), 0), reinterpret_tensor(buf418, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf418, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf419, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf419, (4, 4, 4), (48, 12, 1), 4), buf432, buf435, buf448, buf451, buf464, buf467, buf480, buf483, reinterpret_tensor(buf490, (16, 4), (4, 1), 0), reinterpret_tensor( buf492, (16, 4), (4, 1), 0), reinterpret_tensor(buf494, (16, 4), (4, 1), 0), reinterpret_tensor(buf496, (16, 4), (4, 1), 0), buf508, buf511, buf524, buf527, buf540, buf543, buf556, buf559, reinterpret_tensor(buf567, (16, 8), (8, 1), 0), reinterpret_tensor( buf569, (16, 8), (8, 1), 0), buf574, buf575, buf576, buf577, reinterpret_tensor(buf578, (16, 4), (4, 1), 0), reinterpret_tensor( buf580, (16, 4), (4, 1), 0), reinterpret_tensor(buf582, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf582, (4, 4, 4), (48, 12, 1), 4), reinterpret_tensor(buf583, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf583, (4, 4, 4), (48, 12, 1), 4), buf596, buf599, buf612, buf615, buf628, buf631, buf644, buf647, reinterpret_tensor(buf654, (16, 4), (4, 1), 0), reinterpret_tensor( buf656, (16, 4), (4, 1), 0), primals_27, primals_25, reinterpret_tensor(buf583, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf582, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf589, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf590, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf584, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf585, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf583, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf582, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf589, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf590, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf584, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf585, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf583, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf582, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf589, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf590, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf584, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf585, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf583, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf582, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf589, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf590, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf584, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf585, (4, 4, 1), (16, 4, 1), 0), primals_23, buf658, primals_21, buf659, primals_19, primals_17, primals_15, primals_13, reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf498, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf499, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf498, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf499, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf498, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf499, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf499, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf498, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf498, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf499, (4, 4, 1), (48, 12, 1), 0), primals_11, buf660, primals_9, buf661, reinterpret_tensor(buf419, (4, 1, 4), ( 48, 1, 12), 11), reinterpret_tensor(buf418, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf425, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf426, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf420, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf421, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf419, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf418, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf425, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf426, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf420, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf421, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf419, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf418, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf425, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf426, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf420, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf421, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf419, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf418, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf425, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf426, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf420, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf421, (4, 4, 1), (16, 4, 1), 0), buf662, buf663, reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf334, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf335, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf334, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf335, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf334, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf335, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf335, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf334, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf334, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf335, (4, 4, 1), (48, 12, 1), 0), buf664, buf665, reinterpret_tensor(buf255, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf254, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf262, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf256, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf257, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf255, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf254, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf262, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf256, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf257, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf255, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf254, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf262, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf256, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf257, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf255, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf254, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf261, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf262, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf256, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf257, (4, 4, 1), (16, 4, 1), 0), buf666, buf667, reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf170, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf171, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf170, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf171, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf170, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf171, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf171, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf170, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf170, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf171, (4, 4, 1), (48, 12, 1), 0), buf668, buf669, reinterpret_tensor(buf91, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf90, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf97, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf98, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf92, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf93, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf91, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf90, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf97, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf98, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf92, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf93, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf91, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf90, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf97, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf98, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf92, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf93, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf91, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf90, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf97, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf98, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf92, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf93, (4, 4, 1), (16, 4, 1), 0), buf670, buf671, reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 11), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf6, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 7), reinterpret_tensor(buf7, (4, 4, 1), (48, 12, 1), 3), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 10), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf6, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 6), reinterpret_tensor(buf7, (4, 4, 1), (48, 12, 1), 2), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 9), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf6, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 5), reinterpret_tensor(buf7, (4, 4, 1), (48, 12, 1), 1), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 8), reinterpret_tensor(buf7, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf6, (4, 4, 1), (48, 12, 1), 0), reinterpret_tensor(buf6, (4, 1, 4), (48, 1, 12), 4), reinterpret_tensor(buf7, (4, 4, 1), (48, 12, 1), 0), buf672, buf673) class DyIntraModalityUpdate(nn.Module): """ Dynamic Intra-modality Attention Flow """ def __init__(self, v_size, q_size, output_size, num_head, drop=0.0): super(DyIntraModalityUpdate, self).__init__() self.v_size = v_size self.q_size = q_size self.output_size = output_size self.num_head = num_head self.v4q_gate_lin = nn.Linear(v_size, output_size) self.q4v_gate_lin = nn.Linear(q_size, output_size) self.v_lin = nn.Linear(v_size, output_size * 3) self.q_lin = nn.Linear(q_size, output_size * 3) self.v_output = nn.Linear(output_size, output_size) self.q_output = nn.Linear(output_size, output_size) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.drop = nn.Dropout(drop) def forward(self, v, q, v_mask, q_mask): """ v: visual feature [batch, num_obj, feat_size] q: question [batch, max_len, feat_size] v_mask [batch, num_obj] q_mask [batch, max_len] """ batch_size, num_obj = v_mask.shape _, max_len = q_mask.shape v_mean = (v * v_mask.unsqueeze(2)).sum(1) / v_mask.sum(1).unsqueeze(1) q_mean = (q * q_mask.unsqueeze(2)).sum(1) / q_mask.sum(1).unsqueeze(1) v4q_gate = self.sigmoid(self.v4q_gate_lin(self.drop(self.relu(v_mean))) ).unsqueeze(1) q4v_gate = self.sigmoid(self.q4v_gate_lin(self.drop(self.relu(q_mean))) ).unsqueeze(1) v_trans = self.v_lin(self.drop(self.relu(v))) q_trans = self.q_lin(self.drop(self.relu(q))) v_trans = v_trans * v_mask.unsqueeze(2) q_trans = q_trans * q_mask.unsqueeze(2) v_k, v_q, v_v = torch.split(v_trans, v_trans.size(2) // 3, dim=2) q_k, q_q, q_v = torch.split(q_trans, q_trans.size(2) // 3, dim=2) new_vq = (1 + q4v_gate) * v_q new_vk = (1 + q4v_gate) * v_k new_qq = (1 + v4q_gate) * q_q new_qk = (1 + v4q_gate) * q_k vk_set = torch.split(new_vk, new_vk.size(2) // self.num_head, dim=2) vq_set = torch.split(new_vq, new_vq.size(2) // self.num_head, dim=2) vv_set = torch.split(v_v, v_v.size(2) // self.num_head, dim=2) qk_set = torch.split(new_qk, new_qk.size(2) // self.num_head, dim=2) qq_set = torch.split(new_qq, new_qq.size(2) // self.num_head, dim=2) qv_set = torch.split(q_v, q_v.size(2) // self.num_head, dim=2) for i in range(self.num_head): vk_slice, vq_slice, vv_slice = vk_set[i], vq_set[i], vv_set[i] qk_slice, qq_slice, qv_slice = qk_set[i], qq_set[i], qv_set[i] v2v = (vq_slice @ vk_slice.transpose(1, 2)).masked_fill(v_mask. unsqueeze(1).expand([batch_size, num_obj, num_obj]) == 0, - 1000000000.0) / (self.output_size // self.num_head) ** 0.5 q2q = (qq_slice @ qk_slice.transpose(1, 2)).masked_fill(q_mask. unsqueeze(1).expand([batch_size, max_len, max_len]) == 0, - 1000000000.0) / (self.output_size // self.num_head) ** 0.5 dyIntraMAF_v2v = F.softmax(v2v, dim=2) dyIntraMAF_q2q = F.softmax(q2q, dim=2) v_update = dyIntraMAF_v2v @ vv_slice if i == 0 else torch.cat(( v_update, dyIntraMAF_v2v @ vv_slice), dim=2) q_update = dyIntraMAF_q2q @ qv_slice if i == 0 else torch.cat(( q_update, dyIntraMAF_q2q @ qv_slice), dim=2) updated_v = self.v_output(self.drop(v + v_update)) updated_q = self.q_output(self.drop(q + q_update)) return updated_v, updated_q class InterModalityUpdate(nn.Module): """ Inter-modality Attention Flow """ def __init__(self, v_size, q_size, output_size, num_head, drop=0.0): super(InterModalityUpdate, self).__init__() self.v_size = v_size self.q_size = q_size self.output_size = output_size self.num_head = num_head self.v_lin = nn.Linear(v_size, output_size * 3) self.q_lin = nn.Linear(q_size, output_size * 3) self.v_output = nn.Linear(output_size + v_size, output_size) self.q_output = nn.Linear(output_size + q_size, output_size) self.relu = nn.ReLU() self.drop = nn.Dropout(drop) def forward(self, v, q, v_mask, q_mask): """ v: visual feature [batch, num_obj, feat_size] q: question [batch, max_len, feat_size] v_mask [batch, num_obj] q_mask [batch, max_len] """ batch_size, num_obj = v_mask.shape _, max_len = q_mask.shape v_trans = self.v_lin(self.drop(self.relu(v))) q_trans = self.q_lin(self.drop(self.relu(q))) v_trans = v_trans * v_mask.unsqueeze(2) q_trans = q_trans * q_mask.unsqueeze(2) v_k, v_q, v_v = torch.split(v_trans, v_trans.size(2) // 3, dim=2) q_k, q_q, q_v = torch.split(q_trans, q_trans.size(2) // 3, dim=2) vk_set = torch.split(v_k, v_k.size(2) // self.num_head, dim=2) vq_set = torch.split(v_q, v_q.size(2) // self.num_head, dim=2) vv_set = torch.split(v_v, v_v.size(2) // self.num_head, dim=2) qk_set = torch.split(q_k, q_k.size(2) // self.num_head, dim=2) qq_set = torch.split(q_q, q_q.size(2) // self.num_head, dim=2) qv_set = torch.split(q_v, q_v.size(2) // self.num_head, dim=2) for i in range(self.num_head): vk_slice, vq_slice, vv_slice = vk_set[i], vq_set[i], vv_set[i] qk_slice, qq_slice, qv_slice = qk_set[i], qq_set[i], qv_set[i] q2v = (vq_slice @ qk_slice.transpose(1, 2)).masked_fill(q_mask. unsqueeze(1).expand([batch_size, num_obj, max_len]) == 0, - 1000000000.0) / (self.output_size // self.num_head) ** 0.5 v2q = (qq_slice @ vk_slice.transpose(1, 2)).masked_fill(v_mask. unsqueeze(1).expand([batch_size, max_len, num_obj]) == 0, - 1000000000.0) / (self.output_size // self.num_head) ** 0.5 interMAF_q2v = F.softmax(q2v, dim=2) interMAF_v2q = F.softmax(v2q, dim=2) v_update = interMAF_q2v @ qv_slice if i == 0 else torch.cat(( v_update, interMAF_q2v @ qv_slice), dim=2) q_update = interMAF_v2q @ vv_slice if i == 0 else torch.cat(( q_update, interMAF_v2q @ vv_slice), dim=2) cat_v = torch.cat((v, v_update), dim=2) cat_q = torch.cat((q, q_update), dim=2) updated_v = self.v_output(self.drop(cat_v)) updated_q = self.q_output(self.drop(cat_q)) return updated_v, updated_q class SingleBlockNew(nn.Module): """ Single Block Inter-/Intra-modality stack multiple times """ def __init__(self, num_block, v_size, q_size, output_size, num_inter_head, num_intra_head, drop=0.0): super(SingleBlockNew, self).__init__() self.v_size = v_size self.q_size = q_size self.output_size = output_size self.num_inter_head = num_inter_head self.num_intra_head = num_intra_head self.num_block = num_block self.v_lin = nn.Linear(v_size, output_size) self.q_lin = nn.Linear(q_size, output_size) self.interBlock = InterModalityUpdate(output_size, output_size, output_size, num_inter_head, drop) self.intraBlock = DyIntraModalityUpdate(output_size, output_size, output_size, num_intra_head, drop) self.drop = nn.Dropout(drop) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.v_lin.weight primals_3 = self.v_lin.bias primals_5 = self.q_lin.weight primals_6 = self.q_lin.bias primals_9 = self.interBlock.v_lin.weight primals_10 = self.interBlock.v_lin.bias primals_11 = self.interBlock.q_lin.weight primals_12 = self.interBlock.q_lin.bias primals_13 = self.interBlock.v_output.weight primals_14 = self.interBlock.v_output.bias primals_15 = self.interBlock.q_output.weight primals_16 = self.interBlock.q_output.bias primals_7 = self.intraBlock.v4q_gate_lin.weight primals_18 = self.intraBlock.v4q_gate_lin.bias primals_8 = self.intraBlock.q4v_gate_lin.weight primals_20 = self.intraBlock.q4v_gate_lin.bias primals_21 = self.intraBlock.v_lin.weight primals_22 = self.intraBlock.v_lin.bias primals_23 = self.intraBlock.q_lin.weight primals_24 = self.intraBlock.q_lin.bias primals_17 = self.intraBlock.v_output.weight primals_26 = self.intraBlock.v_output.bias primals_19 = self.intraBlock.q_output.weight primals_28 = self.intraBlock.q_output.bias primals_1 = input_0 primals_4 = input_1 primals_25 = input_2 primals_27 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28]) return output[0], output[1]
TranTony/DFAF-for-VQA.pytorch
SingleBlock
false
12,053
[ "MIT" ]
0
eba1a893e8e5d3d8bf85078611b0bcf4d56eea86
https://github.com/TranTony/DFAF-for-VQA.pytorch/tree/eba1a893e8e5d3d8bf85078611b0bcf4d56eea86
PatchEmbed3D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xv/cxvyxp6qh5llintn5jz7ixmjsap4tzaig7itbosate6caxtzghom.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 4, 4], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3145728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 8192) % 96 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (96, 3, 2, 4, 4), (96, 32, 16, 4, 1)) assert_size_stride(primals_3, (96, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 4, 4), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 32, 16, 16), (786432, 8192, 256, 16, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 3145728, grid=grid(3145728), stream=stream0) del primals_3 return (buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((96, 3, 2, 4, 4), (96, 32, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class PatchEmbed3D(nn.Module): """ Video to Patch Embedding. Args: patch_size (int): Patch token size. Default: (2,4,4). in_chans (int): Number of input video channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(2, 4, 4), in_chans=3, embed_dim=96, norm_layer=None): super().__init__() self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, x): """Forward function.""" _, _, D, H, W = x.size() if W % self.patch_size[2] != 0: x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2])) if H % self.patch_size[1] != 0: x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1]) ) if D % self.patch_size[0] != 0: x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self. patch_size[0])) x = self.proj(x) if self.norm is not None: D, Wh, Ww = x.size(2), x.size(3), x.size(4) x = x.flatten(2).transpose(1, 2) x = self.norm(x) x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww) return x def get_inputs(): return [torch.rand([4, 3, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 8192 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (96, 3, 2, 4, 4), (96, 32, 16, 4, 1)) assert_size_stride(primals_3, (96,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 4, 4), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 32, 16, 16), (786432, 8192, 256, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(3145728)](buf1, primals_3, 3145728, XBLOCK=512, num_warps=8, num_stages=1) del primals_3 return buf1, primals_1, primals_2 class PatchEmbed3DNew(nn.Module): """ Video to Patch Embedding. Args: patch_size (int): Patch token size. Default: (2,4,4). in_chans (int): Number of input video channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(2, 4, 4), in_chans=3, embed_dim=96, norm_layer=None): super().__init__() self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
acewjh/Video-Swin-Transformer
PatchEmbed3D
false
12,054
[ "Apache-2.0" ]
0
bfbc8dde12e991455b34b921ca45a978b4dbfdbc
https://github.com/acewjh/Video-Swin-Transformer/tree/bfbc8dde12e991455b34b921ca45a978b4dbfdbc
Vgg16
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/f7/cf7tayhctr3m6ezk7xezotpdlc5h4drokdkbz4vy2pfkbdxnmn4q.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 262144 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rv/crv3uzu52jbc4u62gio2klk6cj5xhjt7yazr75tq67kvtteddsn5.py # Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # h => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ku/ckuscrbyawdttbdara4zmhmq3lgm6lvxmizlt7j4v446lfogr7ah.py # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 32 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ws/cwssgzseoqxwmttgkoxdmvdzcrtg4ars5flpnsa2at2qixzwygfj.py # Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # h_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/a5/ca5aycvivtwycqu7yn2xzgnljbqetxezkymwgte32n4b4c3doezm.py # Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_5 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) % 16 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/c3/cc36sjgk3au3ve2witr7srumjy6npsyym5bconvmq65prldokmso.py # Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # h_6 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_13 = async_compile.triton('triton_poi_fused_convolution_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/l4/cl4n5dxp5ry2ji6m3g5uyniuwrai22ts6qhsulpbeng2mhu4ibj7.py # Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_9 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_14 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = (xindex // 256) % 8 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bo/cbory36nvcjc37vmkyigprzjn5qrg2tdk4ivdkunxl3icdtgur5z.py # Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # h_10 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7j/c7jd3cm6j3ovynecovhl5prgqhcik5umc42dyk3cqoyl3ul6ahpm.py # Topologically Sorted Source Nodes: [conv2d_12, h_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_12 => convolution_12 # h_15 => relu_12 # Graph fragment: # %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_11, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_12,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_12, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 64], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = (yindex // 512) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (32768*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (64*y3)), tmp4, xmask) tl.store(out_ptr1 + (y0 + (512*x2) + (32768*y1)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512, ), (1, )) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_26 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf15, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [conv2d_1, h_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf17, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_10.run(buf17, buf18, buf19, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf21, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [conv2d_3, h_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf23, primals_9, 524288, grid=grid(524288), stream=stream0) del primals_9 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) # Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_12.run(buf23, buf24, buf25, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_13.run(buf27, primals_11, 262144, grid=grid(262144), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [conv2d_5, h_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_13.run(buf29, primals_13, 262144, grid=grid(262144), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [conv2d_6, h_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_13.run(buf31, primals_15, 262144, grid=grid(262144), stream=stream0) del primals_15 buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) # Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_14.run(buf31, buf32, buf33, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf35, primals_17, 131072, grid=grid(131072), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [conv2d_8, h_11], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf37, primals_19, 131072, grid=grid(131072), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [conv2d_9, h_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf39, primals_21, 131072, grid=grid(131072), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf41 = buf40; del buf40 # reuse # Topologically Sorted Source Nodes: [conv2d_10, h_13], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf41, primals_23, 131072, grid=grid(131072), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [conv2d_11, h_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf43, primals_25, 131072, grid=grid(131072), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32) buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) # Topologically Sorted Source Nodes: [conv2d_12, h_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_16.run(buf44, primals_27, buf45, buf46, 2048, 64, grid=grid(2048, 64), stream=stream0) del buf44 del primals_27 return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf43, buf46, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class Vgg16(nn.Module): def __init__(self): super(Vgg16, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, X): h = F.relu(self.conv1_1(X), inplace=True) h = F.relu(self.conv1_2(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv2_1(h), inplace=True) h = F.relu(self.conv2_2(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv3_1(h), inplace=True) h = F.relu(self.conv3_2(h), inplace=True) h = F.relu(self.conv3_3(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv4_1(h), inplace=True) h = F.relu(self.conv4_2(h), inplace=True) h = F.relu(self.conv4_3(h), inplace=True) h = F.relu(self.conv5_1(h), inplace=True) h = F.relu(self.conv5_2(h), inplace=True) h = F.relu(self.conv5_3(h), inplace=True) relu5_3 = h return relu5_3 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17, buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23, buf24, buf25, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_13[grid(262144)](buf27, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_13[grid(262144)](buf29, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_13[grid(262144)](buf31, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31, buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_15[grid(131072)](buf35, primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_15[grid(131072)](buf37, primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_15[grid(131072)](buf39, primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf41 = buf40 del buf40 triton_poi_fused_convolution_relu_15[grid(131072)](buf41, primals_23, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_15[grid(131072)](buf43, primals_25, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_16[grid(2048, 64) ](buf44, primals_27, buf45, buf46, 2048, 64, XBLOCK=32, YBLOCK= 32, num_warps=4, num_stages=1) del buf44 del primals_27 return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf43, buf46) class Vgg16New(nn.Module): def __init__(self): super(Vgg16New, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
YueZHOU0926/MUNIT_3D
Vgg16
false
12,055
[ "MIT" ]
0
5cb22b5f3cb127d5b2c4eea038254a7881bab372
https://github.com/YueZHOU0926/MUNIT_3D/tree/5cb22b5f3cb127d5b2c4eea038254a7881bab372
SequenceBias
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vg/cvgvkbs7he2kxlg5pfohohojmk4myarfheu6y73rbt6z3xdls2y7.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %repeat],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3 + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 80, grid=grid(80), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from torch.nn.parameter import Parameter class SequenceBias(nn.Module): """ Adds one bias element to the end of the sequence. so if the input has a shape ``(L, N, E)``, where ``L`` is the sequence length, ``N`` is the batch size, and ``E`` is the embedding dimension, the output will have a shape ``(L+1, N, E)``. Attributes: bias (:class:`torch.nn.parameter.Parameter`): the learnable bias of the module of shape ``(E)``, where ``E`` is the embedding dimension. Example: >>> m = SequenceBias(16) >>> input = torch.randn(20, 4, 16) >>> output = m(input) >>> print(output.size()) torch.Size([21, 4, 16]) """ def __init__(self, embed_dim: 'int'): """ Args: embed_dim: Embedding dimension """ super(SequenceBias, self).__init__() self.bias = Parameter(torch.empty(embed_dim)) self._reset_parameters() def _reset_parameters(self): """ assing's Normally distributed random values to bias. """ nn.init.normal_(self.bias) def forward(self, x): _, bsz, _ = x.shape return torch.cat([x, self.bias.repeat(1, bsz, 1)]) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(80)](primals_1, primals_2, buf0, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class SequenceBiasNew(nn.Module): """ Adds one bias element to the end of the sequence. so if the input has a shape ``(L, N, E)``, where ``L`` is the sequence length, ``N`` is the batch size, and ``E`` is the embedding dimension, the output will have a shape ``(L+1, N, E)``. Attributes: bias (:class:`torch.nn.parameter.Parameter`): the learnable bias of the module of shape ``(E)``, where ``E`` is the embedding dimension. Example: >>> m = SequenceBias(16) >>> input = torch.randn(20, 4, 16) >>> output = m(input) >>> print(output.size()) torch.Size([21, 4, 16]) """ def __init__(self, embed_dim: 'int'): """ Args: embed_dim: Embedding dimension """ super(SequenceBiasNew, self).__init__() self.bias = Parameter(torch.empty(embed_dim)) self._reset_parameters() def _reset_parameters(self): """ assing's Normally distributed random values to bias. """ nn.init.normal_(self.bias) def forward(self, input_0): primals_2 = self.bias primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
adriansarstedt/opacus
SequenceBias
false
12,056
[ "Apache-2.0" ]
0
a6c89e3d6a3a4e3e4b82bc8c68d53265a9a7cba1
https://github.com/adriansarstedt/opacus/tree/a6c89e3d6a3a4e3e4b82bc8c68d53265a9a7cba1
SimpleCNN32Filter
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/bm/cbmnhl52e2aeqk7zqox2hw6totrikw5jwaijysslw6wab2d65chr.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 144) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, None) tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (32, 3, 10, 10), (300, 100, 10, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (4, 4608), (4608, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 12, 12), (4608, 144, 12, 1)) buf1 = buf0; del buf0 # reuse buf3 = empty_strided_cuda((4, 32, 12, 12), (4608, 144, 12, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf3, 18432, grid=grid(18432), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (4, 4608), (4608, 1), 0), reinterpret_tensor(primals_4, (4608, 4), (1, 4608), 0), alpha=1, beta=1, out=buf2) del primals_5 return (buf2, primals_1, primals_3, reinterpret_tensor(buf1, (4, 4608), (4608, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 10, 10), (300, 100, 10, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4608), (4608, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SimpleCNN32Filter(nn.Module): """ Defines a simple CNN arhcitecture with 1 layer """ def __init__(self, num_classes): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=10, stride=2) self.fc1 = nn.Linear(144 * 32, num_classes) def forward(self, x): x = F.relu(self.conv1(x)) x = x.view(-1, 144 * 32) x = self.fc1(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 144 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (32, 3, 10, 10), (300, 100, 10, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (4, 4608), (4608, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 12, 12), (4608, 144, 12, 1)) buf1 = buf0 del buf0 buf3 = empty_strided_cuda((4, 32, 12, 12), (4608, 144, 12, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(18432)]( buf1, primals_2, buf3, 18432, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (4, 4608), (4608, 1), 0), reinterpret_tensor(primals_4, (4608, 4), (1, 4608), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, primals_1, primals_3, reinterpret_tensor(buf1, (4, 4608), (4608, 1), 0), primals_4, buf3 class SimpleCNN32FilterNew(nn.Module): """ Defines a simple CNN arhcitecture with 1 layer """ def __init__(self, num_classes): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=10, stride=2) self.fc1 = nn.Linear(144 * 32, num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
adwaykanhere/df-dn-paper
SimpleCNN32Filter
false
12,057
[ "MIT" ]
0
5df413e06ce33c6be5d005e6d1141de9fcd45cb4
https://github.com/adwaykanhere/df-dn-paper/tree/5df413e06ce33c6be5d005e6d1141de9fcd45cb4
BinaryFocalLossWithLogits
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3n/c3nw3cz2enukxbg3hle47cxkrfim5hl5dlq73t6qiiiljiwvjypn.py # Topologically Sorted Source Nodes: [probs, sub, pow_1, mul, mul_1, add, log, mul_2, pow_2, mul_3, sub_1, mul_4, sub_2, add_1, log_1, mul_5, loss_tmp, loss_tmp_1], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.add, aten.log, aten.sub, aten.squeeze] # Source node to ATen node mapping: # add => add # add_1 => add_1 # log => log # log_1 => log_1 # loss_tmp => sub_3 # loss_tmp_1 => squeeze # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # pow_1 => pow_1 # pow_2 => pow_2 # probs => sigmoid # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -4), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1e-08), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, -3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %unsqueeze), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1e-08), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %log_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_5), kwargs = {}) # %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dim](args = (%sub_3, 1), kwargs = {}) triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0 = async_compile.triton('triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 256 x0 = xindex % 64 x2 = (xindex // 256) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = tmp3 * tmp3 tmp5 = -4.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp9 = 1e-08 tmp10 = tmp1 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp8 * tmp11 tmp13 = tmp1 * tmp1 tmp14 = -3.0 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp7 tmp17 = tmp15 * tmp16 tmp18 = tmp3 + tmp9 tmp19 = tl_math.log(tmp18) tmp20 = tmp17 * tmp19 tmp21 = tmp12 - tmp20 tl.store(out_ptr0 + (x4), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [probs, sub, pow_1, mul, mul_1, add, log, mul_2, pow_2, mul_3, sub_1, mul_4, sub_2, add_1, log_1, mul_5, loss_tmp, loss_tmp_1], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.add, aten.log, aten.sub, aten.squeeze] stream0 = get_raw_stream(0) triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0.run(arg0_1, arg1_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def binary_focal_loss_with_logits(input: 'torch.Tensor', target: 'torch.Tensor', alpha: 'float'=0.25, gamma: 'float'=2.0, reduction: 'str'='none', eps: 'float'=1e-08) ->torch.Tensor: """Function that computes Binary Focal loss. .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: input (torch.Tensor): input data tensor with shape :math:`(N, 1, *)`. target (torch.Tensor): the target tensor with shape :math:`(N, 1, *)`. alpha (float): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. Default: 0.25. gamma (float): Focusing parameter :math:`\\gamma >= 0`. Default: 2.0. reduction (str, optional): Specifies the reduction to apply to the. Default: 'none'. eps (float): for numerically stability when dividing. Default: 1e-8. Returns: torch.tensor: the computed loss. Examples: >>> num_classes = 1 >>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'} >>> logits = torch.tensor([[[[6.325]]],[[[5.26]]],[[[87.49]]]]) >>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]]) >>> binary_focal_loss_with_logits(logits, labels, **kwargs) tensor(4.6052) """ if not isinstance(input, torch.Tensor): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(input))) if not len(input.shape) >= 2: raise ValueError('Invalid input shape, we expect BxCx*. Got: {}'. format(input.shape)) if input.size(0) != target.size(0): raise ValueError( 'Expected input batch_size ({}) to match target batch_size ({}).' .format(input.size(0), target.size(0))) probs = torch.sigmoid(input) target = target.unsqueeze(dim=1) loss_tmp = -alpha * torch.pow(1.0 - probs, gamma) * target * torch.log( probs + eps) - (1 - alpha) * torch.pow(probs, gamma) * (1.0 - target ) * torch.log(1.0 - probs + eps) loss_tmp = loss_tmp.squeeze(dim=1) if reduction == 'none': loss = loss_tmp elif reduction == 'mean': loss = torch.mean(loss_tmp) elif reduction == 'sum': loss = torch.sum(loss_tmp) else: raise NotImplementedError('Invalid reduction mode: {}'.format( reduction)) return loss class BinaryFocalLossWithLogits(nn.Module): """Criterion that computes Focal loss. According to :cite:`lin2017focal`, the Focal loss is computed as follows: .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: alpha (float): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. gamma (float): Focusing parameter :math:`\\gamma >= 0`. reduction (str, optional): Specifies the reduction to apply to the output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Default: ‘none’. Shape: - Input: :math:`(N, 1, *)`. - Target: :math:`(N, 1, *)`. Examples: >>> N = 1 # num_classes >>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'} >>> loss = BinaryFocalLossWithLogits(**kwargs) >>> input = torch.randn(1, N, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) >>> output = loss(input, target) >>> output.backward() """ def __init__(self, alpha: 'float', gamma: 'float'=2.0, reduction: 'str' ='none') ->None: super(BinaryFocalLossWithLogits, self).__init__() self.alpha: 'float' = alpha self.gamma: 'float' = gamma self.reduction: 'str' = reduction self.eps: 'float' = 1e-08 def forward(self, input: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: return binary_focal_loss_with_logits(input, target, self.alpha, self.gamma, self.reduction, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 256 x0 = xindex % 64 x2 = xindex // 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = tmp3 * tmp3 tmp5 = -4.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp9 = 1e-08 tmp10 = tmp1 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp8 * tmp11 tmp13 = tmp1 * tmp1 tmp14 = -3.0 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp7 tmp17 = tmp15 * tmp16 tmp18 = tmp3 + tmp9 tmp19 = tl_math.log(tmp18) tmp20 = tmp17 * tmp19 tmp21 = tmp12 - tmp20 tl.store(out_ptr0 + x4, tmp21, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0[grid(1024) ](arg0_1, arg1_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1 ) del arg0_1 del arg1_1 return buf0, def binary_focal_loss_with_logits(input: 'torch.Tensor', target: 'torch.Tensor', alpha: 'float'=0.25, gamma: 'float'=2.0, reduction: 'str'='none', eps: 'float'=1e-08) ->torch.Tensor: """Function that computes Binary Focal loss. .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: input (torch.Tensor): input data tensor with shape :math:`(N, 1, *)`. target (torch.Tensor): the target tensor with shape :math:`(N, 1, *)`. alpha (float): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. Default: 0.25. gamma (float): Focusing parameter :math:`\\gamma >= 0`. Default: 2.0. reduction (str, optional): Specifies the reduction to apply to the. Default: 'none'. eps (float): for numerically stability when dividing. Default: 1e-8. Returns: torch.tensor: the computed loss. Examples: >>> num_classes = 1 >>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'} >>> logits = torch.tensor([[[[6.325]]],[[[5.26]]],[[[87.49]]]]) >>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]]) >>> binary_focal_loss_with_logits(logits, labels, **kwargs) tensor(4.6052) """ if not isinstance(input, torch.Tensor): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(input))) if not len(input.shape) >= 2: raise ValueError('Invalid input shape, we expect BxCx*. Got: {}'. format(input.shape)) if input.size(0) != target.size(0): raise ValueError( 'Expected input batch_size ({}) to match target batch_size ({}).' .format(input.size(0), target.size(0))) probs = torch.sigmoid(input) target = target.unsqueeze(dim=1) loss_tmp = -alpha * torch.pow(1.0 - probs, gamma) * target * torch.log( probs + eps) - (1 - alpha) * torch.pow(probs, gamma) * (1.0 - target ) * torch.log(1.0 - probs + eps) loss_tmp = loss_tmp.squeeze(dim=1) if reduction == 'none': loss = loss_tmp elif reduction == 'mean': loss = torch.mean(loss_tmp) elif reduction == 'sum': loss = torch.sum(loss_tmp) else: raise NotImplementedError('Invalid reduction mode: {}'.format( reduction)) return loss class BinaryFocalLossWithLogitsNew(nn.Module): """Criterion that computes Focal loss. According to :cite:`lin2017focal`, the Focal loss is computed as follows: .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: alpha (float): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. gamma (float): Focusing parameter :math:`\\gamma >= 0`. reduction (str, optional): Specifies the reduction to apply to the output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Default: ‘none’. Shape: - Input: :math:`(N, 1, *)`. - Target: :math:`(N, 1, *)`. Examples: >>> N = 1 # num_classes >>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'} >>> loss = BinaryFocalLossWithLogits(**kwargs) >>> input = torch.randn(1, N, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) >>> output = loss(input, target) >>> output.backward() """ def __init__(self, alpha: 'float', gamma: 'float'=2.0, reduction: 'str' ='none') ->None: super(BinaryFocalLossWithLogitsNew, self).__init__() self.alpha: 'float' = alpha self.gamma: 'float' = gamma self.reduction: 'str' = reduction self.eps: 'float' = 1e-08 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
adi1999/kornia
BinaryFocalLossWithLogits
false
12,058
[ "ECL-2.0", "Apache-2.0" ]
0
bb476a36e2725d687d1879b5a0d877c1ba860c25
https://github.com/adi1999/kornia/tree/bb476a36e2725d687d1879b5a0d877c1ba860c25
NeuralNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/m7/cm7xlewriw35lcxn7nniu3r3vq2wi5cikv56roeywl7e4danxtkd.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/j4/cj4miacghwuwo6tmp3hylr7yjqyun32g4pisr65oc2dtlcxfwv2f.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uy/cuylqrd7ye33ogvvpsnxb3skali4boxth4tryw5hn4czjzyh4a34.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 512, grid=grid(512), stream=stream0) del buf0 del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0) del buf4 return (buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 8), (8, 1), 0), buf5, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NeuralNetwork(nn.Module): def __init__(self, in_dim, out_dim, hidden_dim=None): """ Simple two-layer neural network. """ super(NeuralNetwork, self).__init__() if hidden_dim is None: hidden_dim = in_dim * 2 self.l1 = nn.Linear(in_dim, hidden_dim) self.ac1 = nn.LeakyReLU() self.l2 = nn.Linear(hidden_dim, out_dim) self.ac2 = nn.Softmax(dim=0) def forward(self, x): x = self.ac1(self.l1(x.float())) return self.ac2(self.l2(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(512)](buf0, primals_3, buf1, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 return buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 8), (8, 1), 0), buf5, primals_4 class NeuralNetworkNew(nn.Module): def __init__(self, in_dim, out_dim, hidden_dim=None): """ Simple two-layer neural network. """ super(NeuralNetworkNew, self).__init__() if hidden_dim is None: hidden_dim = in_dim * 2 self.l1 = nn.Linear(in_dim, hidden_dim) self.ac1 = nn.LeakyReLU() self.l2 = nn.Linear(hidden_dim, out_dim) self.ac2 = nn.Softmax(dim=0) def forward(self, input_0): primals_2 = self.l1.weight primals_3 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
adewynter/Lightboard
NeuralNetwork
false
12,059
[ "Apache-2.0" ]
0
f02eae64f11a989030b52314aa66709477274eb3
https://github.com/adewynter/Lightboard/tree/f02eae64f11a989030b52314aa66709477274eb3
bodypose_model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_5 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xq/cxqz2dr7nh2qabrtemj52pazmhrknj5ltcy32ka252ia6a3jgpqi.py # Topologically Sorted Source Nodes: [input_6, input_7], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_6 => convolution_2 # input_7 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pr/cpri5daxkfbmt5ostbhb5o2avircr64a2rmdkxfackaxyjfc7owe.py # Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_10 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/of/cof37d5wbqzvtkioj7k4me7wqpvfv55rs62ytonj7gij2o3abnod.py # Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_11 => convolution_4 # input_12 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mn/cmnzsv2cdbsuq2sygridqvwumzmcvknuthlumel5m25l2ajsr4ft.py # Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_19 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ic/cicsjqc3cfcjzqlztx4hz7ssqwe47ngo3g2onc6463k3vgfmt5cw.py # Topologically Sorted Source Nodes: [input_20, input_21], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_20 => convolution_8 # input_21 => relu_8 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rs/crsb2j7t6kjc2dizrgavde3h3rerob3nhf7iqux6o24562lkvvoe.py # Topologically Sorted Source Nodes: [input_24, input_25], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_24 => convolution_10 # input_25 => relu_10 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/qy/cqyis4pzdzl2zcpdenz7kfyw4uxhak4ugnkkhusp7xtxj4qytdez.py # Topologically Sorted Source Nodes: [input_26, input_27], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_26 => convolution_11 # input_27 => relu_11 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_11 : [num_users=8] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xh/cxh5qnz7467zwx7kksukcyl5yqbimdzz2jusq6gmtz3v7ngsbddj.py # Topologically Sorted Source Nodes: [out2], Original ATen: [aten.cat] # Source node to ATen node mapping: # out2 => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %convolution_21, %relu_11], 1), kwargs = {}) triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 47360 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) % 185 x0 = xindex % 64 x2 = (xindex // 11840) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 38, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (2432*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 57, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (x0 + (64*((-38) + x1)) + (1216*x2)), tmp13 & xmask, other=0.0) tmp15 = tl.load(in_ptr3 + ((-38) + x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 185, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tl.load(in_ptr4 + (x0 + (64*((-57) + x1)) + (8192*x2)), tmp19 & xmask, other=0.0) tmp23 = tl.where(tmp13, tmp18, tmp22) tmp24 = tl.where(tmp4, tmp9, tmp23) tl.store(out_ptr0 + (x3), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/4y/c4y6uyvhmsek266ivpsqvnnoksgofgtz3h3rggm6nksziikdh57s.py # Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution] # Source node to ATen node mapping: # input_162 => convolution_84 # Graph fragment: # %convolution_84 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_73, %primals_170, %primals_171, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 9728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 38 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ce/ccekwplfoihswfouqfjqcwmfd2cg37pkjpmovikpxyaqzec4g3iq.py # Topologically Sorted Source Nodes: [input_175, input_176], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # input_175 => convolution_91 # input_176 => relu_80 # Graph fragment: # %convolution_91 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_79, %primals_184, %primals_185, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_80 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_91,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_80, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 64) % 19 x2 = (xindex // 1216) x3 = xindex % 1216 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x3 + (1280*x2)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256, ), (1, )) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (256, ), (1, )) assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_25, (128, ), (1, )) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128, ), (1, )) assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_29, (128, ), (1, )) assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_31, (128, ), (1, )) assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_33, (512, ), (1, )) assert_size_stride(primals_34, (38, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_35, (38, ), (1, )) assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (128, ), (1, )) assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (128, ), (1, )) assert_size_stride(primals_40, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (128, ), (1, )) assert_size_stride(primals_42, (512, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_43, (512, ), (1, )) assert_size_stride(primals_44, (19, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_45, (19, ), (1, )) assert_size_stride(primals_46, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_47, (128, ), (1, )) assert_size_stride(primals_48, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_49, (128, ), (1, )) assert_size_stride(primals_50, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_51, (128, ), (1, )) assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_53, (128, ), (1, )) assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_55, (128, ), (1, )) assert_size_stride(primals_56, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_57, (128, ), (1, )) assert_size_stride(primals_58, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_59, (38, ), (1, )) assert_size_stride(primals_60, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_61, (128, ), (1, )) assert_size_stride(primals_62, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_63, (128, ), (1, )) assert_size_stride(primals_64, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_65, (128, ), (1, )) assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_67, (128, ), (1, )) assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_69, (128, ), (1, )) assert_size_stride(primals_70, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_71, (128, ), (1, )) assert_size_stride(primals_72, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_73, (19, ), (1, )) assert_size_stride(primals_74, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_75, (128, ), (1, )) assert_size_stride(primals_76, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_77, (128, ), (1, )) assert_size_stride(primals_78, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_79, (128, ), (1, )) assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_81, (128, ), (1, )) assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_83, (128, ), (1, )) assert_size_stride(primals_84, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_85, (128, ), (1, )) assert_size_stride(primals_86, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_87, (38, ), (1, )) assert_size_stride(primals_88, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_89, (128, ), (1, )) assert_size_stride(primals_90, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_91, (128, ), (1, )) assert_size_stride(primals_92, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_93, (128, ), (1, )) assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_95, (128, ), (1, )) assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_97, (128, ), (1, )) assert_size_stride(primals_98, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_99, (128, ), (1, )) assert_size_stride(primals_100, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_101, (19, ), (1, )) assert_size_stride(primals_102, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_103, (128, ), (1, )) assert_size_stride(primals_104, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_105, (128, ), (1, )) assert_size_stride(primals_106, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_107, (128, ), (1, )) assert_size_stride(primals_108, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_109, (128, ), (1, )) assert_size_stride(primals_110, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_111, (128, ), (1, )) assert_size_stride(primals_112, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_113, (128, ), (1, )) assert_size_stride(primals_114, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_115, (38, ), (1, )) assert_size_stride(primals_116, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_117, (128, ), (1, )) assert_size_stride(primals_118, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_119, (128, ), (1, )) assert_size_stride(primals_120, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_121, (128, ), (1, )) assert_size_stride(primals_122, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_123, (128, ), (1, )) assert_size_stride(primals_124, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_125, (128, ), (1, )) assert_size_stride(primals_126, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_127, (128, ), (1, )) assert_size_stride(primals_128, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_129, (19, ), (1, )) assert_size_stride(primals_130, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_131, (128, ), (1, )) assert_size_stride(primals_132, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_133, (128, ), (1, )) assert_size_stride(primals_134, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_135, (128, ), (1, )) assert_size_stride(primals_136, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_137, (128, ), (1, )) assert_size_stride(primals_138, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_139, (128, ), (1, )) assert_size_stride(primals_140, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_141, (128, ), (1, )) assert_size_stride(primals_142, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_143, (38, ), (1, )) assert_size_stride(primals_144, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_145, (128, ), (1, )) assert_size_stride(primals_146, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_147, (128, ), (1, )) assert_size_stride(primals_148, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_149, (128, ), (1, )) assert_size_stride(primals_150, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_151, (128, ), (1, )) assert_size_stride(primals_152, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_153, (128, ), (1, )) assert_size_stride(primals_154, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_155, (128, ), (1, )) assert_size_stride(primals_156, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_157, (19, ), (1, )) assert_size_stride(primals_158, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_159, (128, ), (1, )) assert_size_stride(primals_160, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_161, (128, ), (1, )) assert_size_stride(primals_162, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_163, (128, ), (1, )) assert_size_stride(primals_164, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_165, (128, ), (1, )) assert_size_stride(primals_166, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_167, (128, ), (1, )) assert_size_stride(primals_168, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_169, (128, ), (1, )) assert_size_stride(primals_170, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_171, (38, ), (1, )) assert_size_stride(primals_172, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_173, (128, ), (1, )) assert_size_stride(primals_174, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_175, (128, ), (1, )) assert_size_stride(primals_176, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_177, (128, ), (1, )) assert_size_stride(primals_178, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_179, (128, ), (1, )) assert_size_stride(primals_180, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_181, (128, ), (1, )) assert_size_stride(primals_182, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_183, (128, ), (1, )) assert_size_stride(primals_184, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_185, (19, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [input_6, input_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [input_8, input_9], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 524288, grid=grid(524288), stream=stream0) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 262144, grid=grid(262144), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 262144, grid=grid(262144), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf17, primals_15, 262144, grid=grid(262144), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [input_17, input_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf19, primals_17, 262144, grid=grid(262144), stream=stream0) del primals_17 buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.int8) # Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf19, buf20, buf21, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [input_20, input_21], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf23, primals_19, 131072, grid=grid(131072), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [input_22], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [input_22, input_23], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf25, primals_21, 131072, grid=grid(131072), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [input_24], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [input_24, input_25], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf27, primals_23, 65536, grid=grid(65536), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [input_26], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [input_26, input_27], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf29, primals_25, 32768, grid=grid(32768), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [input_28], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [input_28, input_29], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf31, primals_27, 32768, grid=grid(32768), stream=stream0) del primals_27 # Topologically Sorted Source Nodes: [input_30], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [input_30, input_31], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf33, primals_29, 32768, grid=grid(32768), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [input_32], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1)) buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [input_32, input_33], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf35, primals_31, 32768, grid=grid(32768), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [input_34], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [input_34, input_35], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf37, primals_33, 131072, grid=grid(131072), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [input_36], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 38, 8, 8), (2432, 64, 8, 1)) # Topologically Sorted Source Nodes: [input_37], Original ATen: [aten.convolution] buf39 = extern_kernels.convolution(buf29, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 128, 8, 8), (8192, 64, 8, 1)) buf40 = buf39; del buf39 # reuse # Topologically Sorted Source Nodes: [input_37, input_38], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf40, primals_37, 32768, grid=grid(32768), stream=stream0) del primals_37 # Topologically Sorted Source Nodes: [input_39], Original ATen: [aten.convolution] buf41 = extern_kernels.convolution(buf40, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 128, 8, 8), (8192, 64, 8, 1)) buf42 = buf41; del buf41 # reuse # Topologically Sorted Source Nodes: [input_39, input_40], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf42, primals_39, 32768, grid=grid(32768), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [input_41], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf42, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1)) buf44 = buf43; del buf43 # reuse # Topologically Sorted Source Nodes: [input_41, input_42], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf44, primals_41, 32768, grid=grid(32768), stream=stream0) del primals_41 # Topologically Sorted Source Nodes: [input_43], Original ATen: [aten.convolution] buf45 = extern_kernels.convolution(buf44, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 8, 8), (32768, 64, 8, 1)) buf46 = buf45; del buf45 # reuse # Topologically Sorted Source Nodes: [input_43, input_44], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf46, primals_43, 131072, grid=grid(131072), stream=stream0) del primals_43 # Topologically Sorted Source Nodes: [input_45], Original ATen: [aten.convolution] buf47 = extern_kernels.convolution(buf46, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 19, 8, 8), (1216, 64, 8, 1)) buf48 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [out2], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf38, primals_35, buf47, primals_45, buf29, buf48, 47360, grid=grid(47360), stream=stream0) del buf38 del buf47 del primals_35 del primals_45 # Topologically Sorted Source Nodes: [input_46], Original ATen: [aten.convolution] buf49 = extern_kernels.convolution(buf48, primals_46, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 128, 8, 8), (8192, 64, 8, 1)) buf50 = buf49; del buf49 # reuse # Topologically Sorted Source Nodes: [input_46, input_47], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf50, primals_47, 32768, grid=grid(32768), stream=stream0) del primals_47 # Topologically Sorted Source Nodes: [input_48], Original ATen: [aten.convolution] buf51 = extern_kernels.convolution(buf50, primals_48, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 8, 8), (8192, 64, 8, 1)) buf52 = buf51; del buf51 # reuse # Topologically Sorted Source Nodes: [input_48, input_49], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf52, primals_49, 32768, grid=grid(32768), stream=stream0) del primals_49 # Topologically Sorted Source Nodes: [input_50], Original ATen: [aten.convolution] buf53 = extern_kernels.convolution(buf52, primals_50, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 128, 8, 8), (8192, 64, 8, 1)) buf54 = buf53; del buf53 # reuse # Topologically Sorted Source Nodes: [input_50, input_51], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf54, primals_51, 32768, grid=grid(32768), stream=stream0) del primals_51 # Topologically Sorted Source Nodes: [input_52], Original ATen: [aten.convolution] buf55 = extern_kernels.convolution(buf54, primals_52, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 128, 8, 8), (8192, 64, 8, 1)) buf56 = buf55; del buf55 # reuse # Topologically Sorted Source Nodes: [input_52, input_53], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf56, primals_53, 32768, grid=grid(32768), stream=stream0) del primals_53 # Topologically Sorted Source Nodes: [input_54], Original ATen: [aten.convolution] buf57 = extern_kernels.convolution(buf56, primals_54, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 128, 8, 8), (8192, 64, 8, 1)) buf58 = buf57; del buf57 # reuse # Topologically Sorted Source Nodes: [input_54, input_55], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf58, primals_55, 32768, grid=grid(32768), stream=stream0) del primals_55 # Topologically Sorted Source Nodes: [input_56], Original ATen: [aten.convolution] buf59 = extern_kernels.convolution(buf58, primals_56, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 128, 8, 8), (8192, 64, 8, 1)) buf60 = buf59; del buf59 # reuse # Topologically Sorted Source Nodes: [input_56, input_57], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf60, primals_57, 32768, grid=grid(32768), stream=stream0) del primals_57 # Topologically Sorted Source Nodes: [input_58], Original ATen: [aten.convolution] buf61 = extern_kernels.convolution(buf60, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 38, 8, 8), (2432, 64, 8, 1)) # Topologically Sorted Source Nodes: [input_59], Original ATen: [aten.convolution] buf62 = extern_kernels.convolution(buf48, primals_60, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1)) buf63 = buf62; del buf62 # reuse # Topologically Sorted Source Nodes: [input_59, input_60], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf63, primals_61, 32768, grid=grid(32768), stream=stream0) del primals_61 # Topologically Sorted Source Nodes: [input_61], Original ATen: [aten.convolution] buf64 = extern_kernels.convolution(buf63, primals_62, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1)) buf65 = buf64; del buf64 # reuse # Topologically Sorted Source Nodes: [input_61, input_62], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf65, primals_63, 32768, grid=grid(32768), stream=stream0) del primals_63 # Topologically Sorted Source Nodes: [input_63], Original ATen: [aten.convolution] buf66 = extern_kernels.convolution(buf65, primals_64, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 128, 8, 8), (8192, 64, 8, 1)) buf67 = buf66; del buf66 # reuse # Topologically Sorted Source Nodes: [input_63, input_64], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf67, primals_65, 32768, grid=grid(32768), stream=stream0) del primals_65 # Topologically Sorted Source Nodes: [input_65], Original ATen: [aten.convolution] buf68 = extern_kernels.convolution(buf67, primals_66, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1)) buf69 = buf68; del buf68 # reuse # Topologically Sorted Source Nodes: [input_65, input_66], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf69, primals_67, 32768, grid=grid(32768), stream=stream0) del primals_67 # Topologically Sorted Source Nodes: [input_67], Original ATen: [aten.convolution] buf70 = extern_kernels.convolution(buf69, primals_68, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1)) buf71 = buf70; del buf70 # reuse # Topologically Sorted Source Nodes: [input_67, input_68], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf71, primals_69, 32768, grid=grid(32768), stream=stream0) del primals_69 # Topologically Sorted Source Nodes: [input_69], Original ATen: [aten.convolution] buf72 = extern_kernels.convolution(buf71, primals_70, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1)) buf73 = buf72; del buf72 # reuse # Topologically Sorted Source Nodes: [input_69, input_70], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf73, primals_71, 32768, grid=grid(32768), stream=stream0) del primals_71 # Topologically Sorted Source Nodes: [input_71], Original ATen: [aten.convolution] buf74 = extern_kernels.convolution(buf73, primals_72, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf74, (4, 19, 8, 8), (1216, 64, 8, 1)) buf75 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [out3], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf61, primals_59, buf74, primals_73, buf29, buf75, 47360, grid=grid(47360), stream=stream0) del buf61 del buf74 del primals_59 del primals_73 # Topologically Sorted Source Nodes: [input_72], Original ATen: [aten.convolution] buf76 = extern_kernels.convolution(buf75, primals_74, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1)) buf77 = buf76; del buf76 # reuse # Topologically Sorted Source Nodes: [input_72, input_73], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf77, primals_75, 32768, grid=grid(32768), stream=stream0) del primals_75 # Topologically Sorted Source Nodes: [input_74], Original ATen: [aten.convolution] buf78 = extern_kernels.convolution(buf77, primals_76, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1)) buf79 = buf78; del buf78 # reuse # Topologically Sorted Source Nodes: [input_74, input_75], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf79, primals_77, 32768, grid=grid(32768), stream=stream0) del primals_77 # Topologically Sorted Source Nodes: [input_76], Original ATen: [aten.convolution] buf80 = extern_kernels.convolution(buf79, primals_78, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf80, (4, 128, 8, 8), (8192, 64, 8, 1)) buf81 = buf80; del buf80 # reuse # Topologically Sorted Source Nodes: [input_76, input_77], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf81, primals_79, 32768, grid=grid(32768), stream=stream0) del primals_79 # Topologically Sorted Source Nodes: [input_78], Original ATen: [aten.convolution] buf82 = extern_kernels.convolution(buf81, primals_80, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1)) buf83 = buf82; del buf82 # reuse # Topologically Sorted Source Nodes: [input_78, input_79], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf83, primals_81, 32768, grid=grid(32768), stream=stream0) del primals_81 # Topologically Sorted Source Nodes: [input_80], Original ATen: [aten.convolution] buf84 = extern_kernels.convolution(buf83, primals_82, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1)) buf85 = buf84; del buf84 # reuse # Topologically Sorted Source Nodes: [input_80, input_81], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf85, primals_83, 32768, grid=grid(32768), stream=stream0) del primals_83 # Topologically Sorted Source Nodes: [input_82], Original ATen: [aten.convolution] buf86 = extern_kernels.convolution(buf85, primals_84, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1)) buf87 = buf86; del buf86 # reuse # Topologically Sorted Source Nodes: [input_82, input_83], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf87, primals_85, 32768, grid=grid(32768), stream=stream0) del primals_85 # Topologically Sorted Source Nodes: [input_84], Original ATen: [aten.convolution] buf88 = extern_kernels.convolution(buf87, primals_86, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf88, (4, 38, 8, 8), (2432, 64, 8, 1)) # Topologically Sorted Source Nodes: [input_85], Original ATen: [aten.convolution] buf89 = extern_kernels.convolution(buf75, primals_88, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf89, (4, 128, 8, 8), (8192, 64, 8, 1)) buf90 = buf89; del buf89 # reuse # Topologically Sorted Source Nodes: [input_85, input_86], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf90, primals_89, 32768, grid=grid(32768), stream=stream0) del primals_89 # Topologically Sorted Source Nodes: [input_87], Original ATen: [aten.convolution] buf91 = extern_kernels.convolution(buf90, primals_90, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf91, (4, 128, 8, 8), (8192, 64, 8, 1)) buf92 = buf91; del buf91 # reuse # Topologically Sorted Source Nodes: [input_87, input_88], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf92, primals_91, 32768, grid=grid(32768), stream=stream0) del primals_91 # Topologically Sorted Source Nodes: [input_89], Original ATen: [aten.convolution] buf93 = extern_kernels.convolution(buf92, primals_92, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf93, (4, 128, 8, 8), (8192, 64, 8, 1)) buf94 = buf93; del buf93 # reuse # Topologically Sorted Source Nodes: [input_89, input_90], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf94, primals_93, 32768, grid=grid(32768), stream=stream0) del primals_93 # Topologically Sorted Source Nodes: [input_91], Original ATen: [aten.convolution] buf95 = extern_kernels.convolution(buf94, primals_94, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf95, (4, 128, 8, 8), (8192, 64, 8, 1)) buf96 = buf95; del buf95 # reuse # Topologically Sorted Source Nodes: [input_91, input_92], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf96, primals_95, 32768, grid=grid(32768), stream=stream0) del primals_95 # Topologically Sorted Source Nodes: [input_93], Original ATen: [aten.convolution] buf97 = extern_kernels.convolution(buf96, primals_96, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf97, (4, 128, 8, 8), (8192, 64, 8, 1)) buf98 = buf97; del buf97 # reuse # Topologically Sorted Source Nodes: [input_93, input_94], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf98, primals_97, 32768, grid=grid(32768), stream=stream0) del primals_97 # Topologically Sorted Source Nodes: [input_95], Original ATen: [aten.convolution] buf99 = extern_kernels.convolution(buf98, primals_98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 128, 8, 8), (8192, 64, 8, 1)) buf100 = buf99; del buf99 # reuse # Topologically Sorted Source Nodes: [input_95, input_96], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf100, primals_99, 32768, grid=grid(32768), stream=stream0) del primals_99 # Topologically Sorted Source Nodes: [input_97], Original ATen: [aten.convolution] buf101 = extern_kernels.convolution(buf100, primals_100, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf101, (4, 19, 8, 8), (1216, 64, 8, 1)) buf102 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [out4], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf88, primals_87, buf101, primals_101, buf29, buf102, 47360, grid=grid(47360), stream=stream0) del buf101 del buf88 del primals_101 del primals_87 # Topologically Sorted Source Nodes: [input_98], Original ATen: [aten.convolution] buf103 = extern_kernels.convolution(buf102, primals_102, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf103, (4, 128, 8, 8), (8192, 64, 8, 1)) buf104 = buf103; del buf103 # reuse # Topologically Sorted Source Nodes: [input_98, input_99], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf104, primals_103, 32768, grid=grid(32768), stream=stream0) del primals_103 # Topologically Sorted Source Nodes: [input_100], Original ATen: [aten.convolution] buf105 = extern_kernels.convolution(buf104, primals_104, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf105, (4, 128, 8, 8), (8192, 64, 8, 1)) buf106 = buf105; del buf105 # reuse # Topologically Sorted Source Nodes: [input_100, input_101], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf106, primals_105, 32768, grid=grid(32768), stream=stream0) del primals_105 # Topologically Sorted Source Nodes: [input_102], Original ATen: [aten.convolution] buf107 = extern_kernels.convolution(buf106, primals_106, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf107, (4, 128, 8, 8), (8192, 64, 8, 1)) buf108 = buf107; del buf107 # reuse # Topologically Sorted Source Nodes: [input_102, input_103], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf108, primals_107, 32768, grid=grid(32768), stream=stream0) del primals_107 # Topologically Sorted Source Nodes: [input_104], Original ATen: [aten.convolution] buf109 = extern_kernels.convolution(buf108, primals_108, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf109, (4, 128, 8, 8), (8192, 64, 8, 1)) buf110 = buf109; del buf109 # reuse # Topologically Sorted Source Nodes: [input_104, input_105], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf110, primals_109, 32768, grid=grid(32768), stream=stream0) del primals_109 # Topologically Sorted Source Nodes: [input_106], Original ATen: [aten.convolution] buf111 = extern_kernels.convolution(buf110, primals_110, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf111, (4, 128, 8, 8), (8192, 64, 8, 1)) buf112 = buf111; del buf111 # reuse # Topologically Sorted Source Nodes: [input_106, input_107], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf112, primals_111, 32768, grid=grid(32768), stream=stream0) del primals_111 # Topologically Sorted Source Nodes: [input_108], Original ATen: [aten.convolution] buf113 = extern_kernels.convolution(buf112, primals_112, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf113, (4, 128, 8, 8), (8192, 64, 8, 1)) buf114 = buf113; del buf113 # reuse # Topologically Sorted Source Nodes: [input_108, input_109], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf114, primals_113, 32768, grid=grid(32768), stream=stream0) del primals_113 # Topologically Sorted Source Nodes: [input_110], Original ATen: [aten.convolution] buf115 = extern_kernels.convolution(buf114, primals_114, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf115, (4, 38, 8, 8), (2432, 64, 8, 1)) # Topologically Sorted Source Nodes: [input_111], Original ATen: [aten.convolution] buf116 = extern_kernels.convolution(buf102, primals_116, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf116, (4, 128, 8, 8), (8192, 64, 8, 1)) buf117 = buf116; del buf116 # reuse # Topologically Sorted Source Nodes: [input_111, input_112], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf117, primals_117, 32768, grid=grid(32768), stream=stream0) del primals_117 # Topologically Sorted Source Nodes: [input_113], Original ATen: [aten.convolution] buf118 = extern_kernels.convolution(buf117, primals_118, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf118, (4, 128, 8, 8), (8192, 64, 8, 1)) buf119 = buf118; del buf118 # reuse # Topologically Sorted Source Nodes: [input_113, input_114], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf119, primals_119, 32768, grid=grid(32768), stream=stream0) del primals_119 # Topologically Sorted Source Nodes: [input_115], Original ATen: [aten.convolution] buf120 = extern_kernels.convolution(buf119, primals_120, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 128, 8, 8), (8192, 64, 8, 1)) buf121 = buf120; del buf120 # reuse # Topologically Sorted Source Nodes: [input_115, input_116], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf121, primals_121, 32768, grid=grid(32768), stream=stream0) del primals_121 # Topologically Sorted Source Nodes: [input_117], Original ATen: [aten.convolution] buf122 = extern_kernels.convolution(buf121, primals_122, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf122, (4, 128, 8, 8), (8192, 64, 8, 1)) buf123 = buf122; del buf122 # reuse # Topologically Sorted Source Nodes: [input_117, input_118], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf123, primals_123, 32768, grid=grid(32768), stream=stream0) del primals_123 # Topologically Sorted Source Nodes: [input_119], Original ATen: [aten.convolution] buf124 = extern_kernels.convolution(buf123, primals_124, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1)) buf125 = buf124; del buf124 # reuse # Topologically Sorted Source Nodes: [input_119, input_120], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf125, primals_125, 32768, grid=grid(32768), stream=stream0) del primals_125 # Topologically Sorted Source Nodes: [input_121], Original ATen: [aten.convolution] buf126 = extern_kernels.convolution(buf125, primals_126, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1)) buf127 = buf126; del buf126 # reuse # Topologically Sorted Source Nodes: [input_121, input_122], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf127, primals_127, 32768, grid=grid(32768), stream=stream0) del primals_127 # Topologically Sorted Source Nodes: [input_123], Original ATen: [aten.convolution] buf128 = extern_kernels.convolution(buf127, primals_128, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf128, (4, 19, 8, 8), (1216, 64, 8, 1)) buf129 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [out5], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf115, primals_115, buf128, primals_129, buf29, buf129, 47360, grid=grid(47360), stream=stream0) del buf115 del buf128 del primals_115 del primals_129 # Topologically Sorted Source Nodes: [input_124], Original ATen: [aten.convolution] buf130 = extern_kernels.convolution(buf129, primals_130, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf130, (4, 128, 8, 8), (8192, 64, 8, 1)) buf131 = buf130; del buf130 # reuse # Topologically Sorted Source Nodes: [input_124, input_125], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf131, primals_131, 32768, grid=grid(32768), stream=stream0) del primals_131 # Topologically Sorted Source Nodes: [input_126], Original ATen: [aten.convolution] buf132 = extern_kernels.convolution(buf131, primals_132, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf132, (4, 128, 8, 8), (8192, 64, 8, 1)) buf133 = buf132; del buf132 # reuse # Topologically Sorted Source Nodes: [input_126, input_127], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf133, primals_133, 32768, grid=grid(32768), stream=stream0) del primals_133 # Topologically Sorted Source Nodes: [input_128], Original ATen: [aten.convolution] buf134 = extern_kernels.convolution(buf133, primals_134, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf134, (4, 128, 8, 8), (8192, 64, 8, 1)) buf135 = buf134; del buf134 # reuse # Topologically Sorted Source Nodes: [input_128, input_129], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf135, primals_135, 32768, grid=grid(32768), stream=stream0) del primals_135 # Topologically Sorted Source Nodes: [input_130], Original ATen: [aten.convolution] buf136 = extern_kernels.convolution(buf135, primals_136, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf136, (4, 128, 8, 8), (8192, 64, 8, 1)) buf137 = buf136; del buf136 # reuse # Topologically Sorted Source Nodes: [input_130, input_131], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf137, primals_137, 32768, grid=grid(32768), stream=stream0) del primals_137 # Topologically Sorted Source Nodes: [input_132], Original ATen: [aten.convolution] buf138 = extern_kernels.convolution(buf137, primals_138, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf138, (4, 128, 8, 8), (8192, 64, 8, 1)) buf139 = buf138; del buf138 # reuse # Topologically Sorted Source Nodes: [input_132, input_133], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf139, primals_139, 32768, grid=grid(32768), stream=stream0) del primals_139 # Topologically Sorted Source Nodes: [input_134], Original ATen: [aten.convolution] buf140 = extern_kernels.convolution(buf139, primals_140, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1)) buf141 = buf140; del buf140 # reuse # Topologically Sorted Source Nodes: [input_134, input_135], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf141, primals_141, 32768, grid=grid(32768), stream=stream0) del primals_141 # Topologically Sorted Source Nodes: [input_136], Original ATen: [aten.convolution] buf142 = extern_kernels.convolution(buf141, primals_142, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf142, (4, 38, 8, 8), (2432, 64, 8, 1)) # Topologically Sorted Source Nodes: [input_137], Original ATen: [aten.convolution] buf143 = extern_kernels.convolution(buf129, primals_144, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf143, (4, 128, 8, 8), (8192, 64, 8, 1)) buf144 = buf143; del buf143 # reuse # Topologically Sorted Source Nodes: [input_137, input_138], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf144, primals_145, 32768, grid=grid(32768), stream=stream0) del primals_145 # Topologically Sorted Source Nodes: [input_139], Original ATen: [aten.convolution] buf145 = extern_kernels.convolution(buf144, primals_146, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf145, (4, 128, 8, 8), (8192, 64, 8, 1)) buf146 = buf145; del buf145 # reuse # Topologically Sorted Source Nodes: [input_139, input_140], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf146, primals_147, 32768, grid=grid(32768), stream=stream0) del primals_147 # Topologically Sorted Source Nodes: [input_141], Original ATen: [aten.convolution] buf147 = extern_kernels.convolution(buf146, primals_148, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf147, (4, 128, 8, 8), (8192, 64, 8, 1)) buf148 = buf147; del buf147 # reuse # Topologically Sorted Source Nodes: [input_141, input_142], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf148, primals_149, 32768, grid=grid(32768), stream=stream0) del primals_149 # Topologically Sorted Source Nodes: [input_143], Original ATen: [aten.convolution] buf149 = extern_kernels.convolution(buf148, primals_150, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf149, (4, 128, 8, 8), (8192, 64, 8, 1)) buf150 = buf149; del buf149 # reuse # Topologically Sorted Source Nodes: [input_143, input_144], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf150, primals_151, 32768, grid=grid(32768), stream=stream0) del primals_151 # Topologically Sorted Source Nodes: [input_145], Original ATen: [aten.convolution] buf151 = extern_kernels.convolution(buf150, primals_152, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf151, (4, 128, 8, 8), (8192, 64, 8, 1)) buf152 = buf151; del buf151 # reuse # Topologically Sorted Source Nodes: [input_145, input_146], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf152, primals_153, 32768, grid=grid(32768), stream=stream0) del primals_153 # Topologically Sorted Source Nodes: [input_147], Original ATen: [aten.convolution] buf153 = extern_kernels.convolution(buf152, primals_154, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf153, (4, 128, 8, 8), (8192, 64, 8, 1)) buf154 = buf153; del buf153 # reuse # Topologically Sorted Source Nodes: [input_147, input_148], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf154, primals_155, 32768, grid=grid(32768), stream=stream0) del primals_155 # Topologically Sorted Source Nodes: [input_149], Original ATen: [aten.convolution] buf155 = extern_kernels.convolution(buf154, primals_156, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf155, (4, 19, 8, 8), (1216, 64, 8, 1)) buf156 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [out6], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf142, primals_143, buf155, primals_157, buf29, buf156, 47360, grid=grid(47360), stream=stream0) del buf142 del buf155 del primals_143 del primals_157 # Topologically Sorted Source Nodes: [input_150], Original ATen: [aten.convolution] buf157 = extern_kernels.convolution(buf156, primals_158, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf157, (4, 128, 8, 8), (8192, 64, 8, 1)) buf158 = buf157; del buf157 # reuse # Topologically Sorted Source Nodes: [input_150, input_151], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf158, primals_159, 32768, grid=grid(32768), stream=stream0) del primals_159 # Topologically Sorted Source Nodes: [input_152], Original ATen: [aten.convolution] buf159 = extern_kernels.convolution(buf158, primals_160, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf159, (4, 128, 8, 8), (8192, 64, 8, 1)) buf160 = buf159; del buf159 # reuse # Topologically Sorted Source Nodes: [input_152, input_153], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf160, primals_161, 32768, grid=grid(32768), stream=stream0) del primals_161 # Topologically Sorted Source Nodes: [input_154], Original ATen: [aten.convolution] buf161 = extern_kernels.convolution(buf160, primals_162, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf161, (4, 128, 8, 8), (8192, 64, 8, 1)) buf162 = buf161; del buf161 # reuse # Topologically Sorted Source Nodes: [input_154, input_155], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf162, primals_163, 32768, grid=grid(32768), stream=stream0) del primals_163 # Topologically Sorted Source Nodes: [input_156], Original ATen: [aten.convolution] buf163 = extern_kernels.convolution(buf162, primals_164, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf163, (4, 128, 8, 8), (8192, 64, 8, 1)) buf164 = buf163; del buf163 # reuse # Topologically Sorted Source Nodes: [input_156, input_157], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf164, primals_165, 32768, grid=grid(32768), stream=stream0) del primals_165 # Topologically Sorted Source Nodes: [input_158], Original ATen: [aten.convolution] buf165 = extern_kernels.convolution(buf164, primals_166, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf165, (4, 128, 8, 8), (8192, 64, 8, 1)) buf166 = buf165; del buf165 # reuse # Topologically Sorted Source Nodes: [input_158, input_159], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf166, primals_167, 32768, grid=grid(32768), stream=stream0) del primals_167 # Topologically Sorted Source Nodes: [input_160], Original ATen: [aten.convolution] buf167 = extern_kernels.convolution(buf166, primals_168, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf167, (4, 128, 8, 8), (8192, 64, 8, 1)) buf168 = buf167; del buf167 # reuse # Topologically Sorted Source Nodes: [input_160, input_161], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf168, primals_169, 32768, grid=grid(32768), stream=stream0) del primals_169 # Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution] buf169 = extern_kernels.convolution(buf168, primals_170, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf169, (4, 38, 8, 8), (2432, 64, 8, 1)) buf170 = buf169; del buf169 # reuse # Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution] triton_poi_fused_convolution_10.run(buf170, primals_171, 9728, grid=grid(9728), stream=stream0) del primals_171 # Topologically Sorted Source Nodes: [input_163], Original ATen: [aten.convolution] buf171 = extern_kernels.convolution(buf156, primals_172, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf171, (4, 128, 8, 8), (8192, 64, 8, 1)) buf172 = buf171; del buf171 # reuse # Topologically Sorted Source Nodes: [input_163, input_164], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf172, primals_173, 32768, grid=grid(32768), stream=stream0) del primals_173 # Topologically Sorted Source Nodes: [input_165], Original ATen: [aten.convolution] buf173 = extern_kernels.convolution(buf172, primals_174, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf173, (4, 128, 8, 8), (8192, 64, 8, 1)) buf174 = buf173; del buf173 # reuse # Topologically Sorted Source Nodes: [input_165, input_166], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf174, primals_175, 32768, grid=grid(32768), stream=stream0) del primals_175 # Topologically Sorted Source Nodes: [input_167], Original ATen: [aten.convolution] buf175 = extern_kernels.convolution(buf174, primals_176, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf175, (4, 128, 8, 8), (8192, 64, 8, 1)) buf176 = buf175; del buf175 # reuse # Topologically Sorted Source Nodes: [input_167, input_168], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf176, primals_177, 32768, grid=grid(32768), stream=stream0) del primals_177 # Topologically Sorted Source Nodes: [input_169], Original ATen: [aten.convolution] buf177 = extern_kernels.convolution(buf176, primals_178, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf177, (4, 128, 8, 8), (8192, 64, 8, 1)) buf178 = buf177; del buf177 # reuse # Topologically Sorted Source Nodes: [input_169, input_170], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf178, primals_179, 32768, grid=grid(32768), stream=stream0) del primals_179 # Topologically Sorted Source Nodes: [input_171], Original ATen: [aten.convolution] buf179 = extern_kernels.convolution(buf178, primals_180, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf179, (4, 128, 8, 8), (8192, 64, 8, 1)) buf180 = buf179; del buf179 # reuse # Topologically Sorted Source Nodes: [input_171, input_172], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf180, primals_181, 32768, grid=grid(32768), stream=stream0) del primals_181 # Topologically Sorted Source Nodes: [input_173], Original ATen: [aten.convolution] buf181 = extern_kernels.convolution(buf180, primals_182, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf181, (4, 128, 8, 8), (8192, 64, 8, 1)) buf182 = buf181; del buf181 # reuse # Topologically Sorted Source Nodes: [input_173, input_174], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf182, primals_183, 32768, grid=grid(32768), stream=stream0) del primals_183 # Topologically Sorted Source Nodes: [input_175], Original ATen: [aten.convolution] buf183 = extern_kernels.convolution(buf182, primals_184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf183, (4, 19, 8, 8), (1216, 64, 8, 1)) buf184 = buf183; del buf183 # reuse buf185 = empty_strided_cuda((4, 19, 8, 8), (1280, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [input_175, input_176], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_11.run(buf184, primals_185, buf185, 4864, grid=grid(4864), stream=stream0) del primals_185 return (buf170, buf184, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, primals_54, primals_56, primals_58, primals_60, primals_62, primals_64, primals_66, primals_68, primals_70, primals_72, primals_74, primals_76, primals_78, primals_80, primals_82, primals_84, primals_86, primals_88, primals_90, primals_92, primals_94, primals_96, primals_98, primals_100, primals_102, primals_104, primals_106, primals_108, primals_110, primals_112, primals_114, primals_116, primals_118, primals_120, primals_122, primals_124, primals_126, primals_128, primals_130, primals_132, primals_134, primals_136, primals_138, primals_140, primals_142, primals_144, primals_146, primals_148, primals_150, primals_152, primals_154, primals_156, primals_158, primals_160, primals_162, primals_164, primals_166, primals_168, primals_170, primals_172, primals_174, primals_176, primals_178, primals_180, primals_182, primals_184, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf63, buf65, buf67, buf69, buf71, buf73, buf75, buf77, buf79, buf81, buf83, buf85, buf87, buf90, buf92, buf94, buf96, buf98, buf100, buf102, buf104, buf106, buf108, buf110, buf112, buf114, buf117, buf119, buf121, buf123, buf125, buf127, buf129, buf131, buf133, buf135, buf137, buf139, buf141, buf144, buf146, buf148, buf150, buf152, buf154, buf156, buf158, buf160, buf162, buf164, buf166, buf168, buf172, buf174, buf176, buf178, buf180, buf182, buf185, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((512, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((38, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((512, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((19, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_52 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_53 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_54 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_55 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_56 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_57 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_58 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_59 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32) primals_60 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_61 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_62 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_63 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_64 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_65 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_66 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_67 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_68 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_69 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_70 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_71 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_72 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_73 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32) primals_74 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_75 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_76 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_77 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_78 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_79 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_80 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_81 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_82 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_83 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_84 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_85 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_86 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_87 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32) primals_88 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_89 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_90 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_91 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_92 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_93 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_94 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_95 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_96 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_97 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_98 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_99 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_100 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_101 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32) primals_102 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_103 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_104 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_105 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_106 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_107 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_108 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_109 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_110 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_111 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_112 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_113 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_114 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_115 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32) primals_116 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_117 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_118 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_119 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_120 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_121 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_122 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_123 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_124 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_125 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_126 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_127 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_128 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_129 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32) primals_130 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_131 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_132 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_133 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_134 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_135 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_136 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_137 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_138 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_139 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_140 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_141 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_142 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_143 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32) primals_144 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_145 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_146 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_147 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_148 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_149 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_150 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_151 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_152 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_153 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_154 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_155 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_156 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_157 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32) primals_158 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_159 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_160 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_161 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_162 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_163 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_164 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_165 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_166 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_167 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_168 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_169 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_170 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_171 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32) primals_172 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_173 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_174 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_175 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_176 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_177 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_178 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_179 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_180 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_181 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_182 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_183 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_184 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_185 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from collections import OrderedDict import torch.nn as nn def make_layers(block, no_relu_layers): layers = [] for layer_name, v in block.items(): if 'pool' in layer_name: layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2]) layers.append((layer_name, layer)) else: conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers.append((layer_name, conv2d)) if layer_name not in no_relu_layers: layers.append(('relu_' + layer_name, nn.ReLU(inplace=True))) return nn.Sequential(OrderedDict(layers)) class bodypose_model(nn.Module): def __init__(self): super(bodypose_model, self).__init__() no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] blocks = {} block0 = OrderedDict({'conv1_1': [3, 64, 3, 1, 1], 'conv1_2': [64, 64, 3, 1, 1], 'pool1_stage1': [2, 2, 0], 'conv2_1': [64, 128, 3, 1, 1], 'conv2_2': [128, 128, 3, 1, 1], 'pool2_stage1': [2, 2, 0 ], 'conv3_1': [128, 256, 3, 1, 1], 'conv3_2': [256, 256, 3, 1, 1], 'conv3_3': [256, 256, 3, 1, 1], 'conv3_4': [256, 256, 3, 1, 1], 'pool3_stage1': [2, 2, 0], 'conv4_1': [256, 512, 3, 1, 1], 'conv4_2': [512, 512, 3, 1, 1], 'conv4_3_CPM': [512, 256, 3, 1, 1], 'conv4_4_CPM': [256, 128, 3, 1, 1]}) block1_1 = OrderedDict({'conv5_1_CPM_L1': [128, 128, 3, 1, 1], 'conv5_2_CPM_L1': [128, 128, 3, 1, 1], 'conv5_3_CPM_L1': [128, 128, 3, 1, 1], 'conv5_4_CPM_L1': [128, 512, 1, 1, 0], 'conv5_5_CPM_L1': [512, 38, 1, 1, 0]}) block1_2 = OrderedDict({'conv5_1_CPM_L2': [128, 128, 3, 1, 1], 'conv5_2_CPM_L2': [128, 128, 3, 1, 1], 'conv5_3_CPM_L2': [128, 128, 3, 1, 1], 'conv5_4_CPM_L2': [128, 512, 1, 1, 0], 'conv5_5_CPM_L2': [512, 19, 1, 1, 0]}) blocks['block1_1'] = block1_1 blocks['block1_2'] = block1_2 self.model0 = make_layers(block0, no_relu_layers) for i in range(2, 7): blocks['block%d_1' % i] = OrderedDict({('Mconv1_stage%d_L1' % i ): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L1' % i): [128, 128, 7, 1, 3], ('Mconv3_stage%d_L1' % i): [128, 128, 7, 1, 3], ('Mconv4_stage%d_L1' % i): [128, 128, 7, 1, 3], ( 'Mconv5_stage%d_L1' % i): [128, 128, 7, 1, 3], ( 'Mconv6_stage%d_L1' % i): [128, 128, 1, 1, 0], ( 'Mconv7_stage%d_L1' % i): [128, 38, 1, 1, 0]}) blocks['block%d_2' % i] = OrderedDict({('Mconv1_stage%d_L2' % i ): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L2' % i): [128, 128, 7, 1, 3], ('Mconv3_stage%d_L2' % i): [128, 128, 7, 1, 3], ('Mconv4_stage%d_L2' % i): [128, 128, 7, 1, 3], ( 'Mconv5_stage%d_L2' % i): [128, 128, 7, 1, 3], ( 'Mconv6_stage%d_L2' % i): [128, 128, 1, 1, 0], ( 'Mconv7_stage%d_L2' % i): [128, 19, 1, 1, 0]}) for k in blocks.keys(): blocks[k] = make_layers(blocks[k], no_relu_layers) self.model1_1 = blocks['block1_1'] self.model2_1 = blocks['block2_1'] self.model3_1 = blocks['block3_1'] self.model4_1 = blocks['block4_1'] self.model5_1 = blocks['block5_1'] self.model6_1 = blocks['block6_1'] self.model1_2 = blocks['block1_2'] self.model2_2 = blocks['block2_2'] self.model3_2 = blocks['block3_2'] self.model4_2 = blocks['block4_2'] self.model5_2 = blocks['block5_2'] self.model6_2 = blocks['block6_2'] def forward(self, x): out1 = self.model0(x) out1_1 = self.model1_1(out1) out1_2 = self.model1_2(out1) out2 = torch.cat([out1_1, out1_2, out1], 1) out2_1 = self.model2_1(out2) out2_2 = self.model2_2(out2) out3 = torch.cat([out2_1, out2_2, out1], 1) out3_1 = self.model3_1(out3) out3_2 = self.model3_2(out3) out4 = torch.cat([out3_1, out3_2, out1], 1) out4_1 = self.model4_1(out4) out4_2 = self.model4_2(out4) out5 = torch.cat([out4_1, out4_2, out1], 1) out5_1 = self.model5_1(out5) out5_2 = self.model5_2(out5) out6 = torch.cat([out5_1, out5_2, out1], 1) out6_1 = self.model6_1(out6) out6_2 = self.model6_2(out6) return out6_1, out6_2 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from collections import OrderedDict import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 47360 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 % 185 x0 = xindex % 64 x2 = xindex // 11840 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 38, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 2432 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 57, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (x0 + 64 * (-38 + x1) + 1216 * x2), tmp13 & xmask, other=0.0) tmp15 = tl.load(in_ptr3 + (-38 + x1), tmp13 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tl.full([1], 185, tl.int64) tmp22 = tl.load(in_ptr4 + (x0 + 64 * (-57 + x1) + 8192 * x2), tmp19 & xmask, other=0.0) tmp23 = tl.where(tmp13, tmp18, tmp22) tmp24 = tl.where(tmp4, tmp9, tmp23) tl.store(out_ptr0 + x3, tmp24, xmask) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 38 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 64 % 19 x2 = xindex // 1216 x3 = xindex % 1216 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x3 + 1280 * x2), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (256,), (1,)) assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_25, (128,), (1,)) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_33, (512,), (1,)) assert_size_stride(primals_34, (38, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_35, (38,), (1,)) assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (128,), (1,)) assert_size_stride(primals_40, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (128,), (1,)) assert_size_stride(primals_42, (512, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_43, (512,), (1,)) assert_size_stride(primals_44, (19, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_45, (19,), (1,)) assert_size_stride(primals_46, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_47, (128,), (1,)) assert_size_stride(primals_48, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_49, (128,), (1,)) assert_size_stride(primals_50, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_51, (128,), (1,)) assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_53, (128,), (1,)) assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_55, (128,), (1,)) assert_size_stride(primals_56, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_57, (128,), (1,)) assert_size_stride(primals_58, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_59, (38,), (1,)) assert_size_stride(primals_60, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_61, (128,), (1,)) assert_size_stride(primals_62, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_63, (128,), (1,)) assert_size_stride(primals_64, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_65, (128,), (1,)) assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_67, (128,), (1,)) assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_69, (128,), (1,)) assert_size_stride(primals_70, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_71, (128,), (1,)) assert_size_stride(primals_72, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_73, (19,), (1,)) assert_size_stride(primals_74, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_75, (128,), (1,)) assert_size_stride(primals_76, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_77, (128,), (1,)) assert_size_stride(primals_78, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_79, (128,), (1,)) assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_81, (128,), (1,)) assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_83, (128,), (1,)) assert_size_stride(primals_84, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_85, (128,), (1,)) assert_size_stride(primals_86, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_87, (38,), (1,)) assert_size_stride(primals_88, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_89, (128,), (1,)) assert_size_stride(primals_90, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_91, (128,), (1,)) assert_size_stride(primals_92, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_93, (128,), (1,)) assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_95, (128,), (1,)) assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_97, (128,), (1,)) assert_size_stride(primals_98, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_99, (128,), (1,)) assert_size_stride(primals_100, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_101, (19,), (1,)) assert_size_stride(primals_102, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_103, (128,), (1,)) assert_size_stride(primals_104, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_105, (128,), (1,)) assert_size_stride(primals_106, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_107, (128,), (1,)) assert_size_stride(primals_108, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_109, (128,), (1,)) assert_size_stride(primals_110, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_111, (128,), (1,)) assert_size_stride(primals_112, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_113, (128,), (1,)) assert_size_stride(primals_114, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_115, (38,), (1,)) assert_size_stride(primals_116, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_117, (128,), (1,)) assert_size_stride(primals_118, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_119, (128,), (1,)) assert_size_stride(primals_120, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_121, (128,), (1,)) assert_size_stride(primals_122, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_123, (128,), (1,)) assert_size_stride(primals_124, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_125, (128,), (1,)) assert_size_stride(primals_126, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_127, (128,), (1,)) assert_size_stride(primals_128, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_129, (19,), (1,)) assert_size_stride(primals_130, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_131, (128,), (1,)) assert_size_stride(primals_132, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_133, (128,), (1,)) assert_size_stride(primals_134, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_135, (128,), (1,)) assert_size_stride(primals_136, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_137, (128,), (1,)) assert_size_stride(primals_138, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_139, (128,), (1,)) assert_size_stride(primals_140, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_141, (128,), (1,)) assert_size_stride(primals_142, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_143, (38,), (1,)) assert_size_stride(primals_144, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_145, (128,), (1,)) assert_size_stride(primals_146, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_147, (128,), (1,)) assert_size_stride(primals_148, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_149, (128,), (1,)) assert_size_stride(primals_150, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_151, (128,), (1,)) assert_size_stride(primals_152, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_153, (128,), (1,)) assert_size_stride(primals_154, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_155, (128,), (1,)) assert_size_stride(primals_156, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_157, (19,), (1,)) assert_size_stride(primals_158, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_159, (128,), (1,)) assert_size_stride(primals_160, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_161, (128,), (1,)) assert_size_stride(primals_162, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_163, (128,), (1,)) assert_size_stride(primals_164, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_165, (128,), (1,)) assert_size_stride(primals_166, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_167, (128,), (1,)) assert_size_stride(primals_168, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_169, (128,), (1,)) assert_size_stride(primals_170, (38, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_171, (38,), (1,)) assert_size_stride(primals_172, (128, 185, 7, 7), (9065, 49, 7, 1)) assert_size_stride(primals_173, (128,), (1,)) assert_size_stride(primals_174, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_175, (128,), (1,)) assert_size_stride(primals_176, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_177, (128,), (1,)) assert_size_stride(primals_178, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_179, (128,), (1,)) assert_size_stride(primals_180, (128, 128, 7, 7), (6272, 49, 7, 1)) assert_size_stride(primals_181, (128,), (1,)) assert_size_stride(primals_182, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_183, (128,), (1,)) assert_size_stride(primals_184, (19, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_185, (19,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4, buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9, buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_4[grid(262144)](buf17, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_4[grid(262144)](buf19, primals_17, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf19, buf20, buf21, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_6[grid(131072)](buf23, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_6[grid(131072)](buf25, primals_21, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_7[grid(65536)](buf27, primals_23, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_23 buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_8[grid(32768)](buf29, primals_25, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_8[grid(32768)](buf31, primals_27, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_relu_8[grid(32768)](buf33, primals_29, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_29 buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_8[grid(32768)](buf35, primals_31, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_31 buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_6[grid(131072)](buf37, primals_33, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_33 buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 38, 8, 8), (2432, 64, 8, 1)) buf39 = extern_kernels.convolution(buf29, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 128, 8, 8), (8192, 64, 8, 1)) buf40 = buf39 del buf39 triton_poi_fused_convolution_relu_8[grid(32768)](buf40, primals_37, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_37 buf41 = extern_kernels.convolution(buf40, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 128, 8, 8), (8192, 64, 8, 1)) buf42 = buf41 del buf41 triton_poi_fused_convolution_relu_8[grid(32768)](buf42, primals_39, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_39 buf43 = extern_kernels.convolution(buf42, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1)) buf44 = buf43 del buf43 triton_poi_fused_convolution_relu_8[grid(32768)](buf44, primals_41, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_41 buf45 = extern_kernels.convolution(buf44, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 8, 8), (32768, 64, 8, 1)) buf46 = buf45 del buf45 triton_poi_fused_convolution_relu_6[grid(131072)](buf46, primals_43, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf47 = extern_kernels.convolution(buf46, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 19, 8, 8), (1216, 64, 8, 1)) buf48 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch .float32) triton_poi_fused_cat_9[grid(47360)](buf38, primals_35, buf47, primals_45, buf29, buf48, 47360, XBLOCK=512, num_warps=4, num_stages=1) del buf38 del buf47 del primals_35 del primals_45 buf49 = extern_kernels.convolution(buf48, primals_46, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 128, 8, 8), (8192, 64, 8, 1)) buf50 = buf49 del buf49 triton_poi_fused_convolution_relu_8[grid(32768)](buf50, primals_47, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_47 buf51 = extern_kernels.convolution(buf50, primals_48, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 8, 8), (8192, 64, 8, 1)) buf52 = buf51 del buf51 triton_poi_fused_convolution_relu_8[grid(32768)](buf52, primals_49, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_49 buf53 = extern_kernels.convolution(buf52, primals_50, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 128, 8, 8), (8192, 64, 8, 1)) buf54 = buf53 del buf53 triton_poi_fused_convolution_relu_8[grid(32768)](buf54, primals_51, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_51 buf55 = extern_kernels.convolution(buf54, primals_52, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 128, 8, 8), (8192, 64, 8, 1)) buf56 = buf55 del buf55 triton_poi_fused_convolution_relu_8[grid(32768)](buf56, primals_53, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_53 buf57 = extern_kernels.convolution(buf56, primals_54, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 128, 8, 8), (8192, 64, 8, 1)) buf58 = buf57 del buf57 triton_poi_fused_convolution_relu_8[grid(32768)](buf58, primals_55, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_55 buf59 = extern_kernels.convolution(buf58, primals_56, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 128, 8, 8), (8192, 64, 8, 1)) buf60 = buf59 del buf59 triton_poi_fused_convolution_relu_8[grid(32768)](buf60, primals_57, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_57 buf61 = extern_kernels.convolution(buf60, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 38, 8, 8), (2432, 64, 8, 1)) buf62 = extern_kernels.convolution(buf48, primals_60, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1)) buf63 = buf62 del buf62 triton_poi_fused_convolution_relu_8[grid(32768)](buf63, primals_61, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_61 buf64 = extern_kernels.convolution(buf63, primals_62, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1)) buf65 = buf64 del buf64 triton_poi_fused_convolution_relu_8[grid(32768)](buf65, primals_63, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_63 buf66 = extern_kernels.convolution(buf65, primals_64, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 128, 8, 8), (8192, 64, 8, 1)) buf67 = buf66 del buf66 triton_poi_fused_convolution_relu_8[grid(32768)](buf67, primals_65, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_65 buf68 = extern_kernels.convolution(buf67, primals_66, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1)) buf69 = buf68 del buf68 triton_poi_fused_convolution_relu_8[grid(32768)](buf69, primals_67, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_67 buf70 = extern_kernels.convolution(buf69, primals_68, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1)) buf71 = buf70 del buf70 triton_poi_fused_convolution_relu_8[grid(32768)](buf71, primals_69, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_69 buf72 = extern_kernels.convolution(buf71, primals_70, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1)) buf73 = buf72 del buf72 triton_poi_fused_convolution_relu_8[grid(32768)](buf73, primals_71, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_71 buf74 = extern_kernels.convolution(buf73, primals_72, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf74, (4, 19, 8, 8), (1216, 64, 8, 1)) buf75 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch .float32) triton_poi_fused_cat_9[grid(47360)](buf61, primals_59, buf74, primals_73, buf29, buf75, 47360, XBLOCK=512, num_warps=4, num_stages=1) del buf61 del buf74 del primals_59 del primals_73 buf76 = extern_kernels.convolution(buf75, primals_74, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1)) buf77 = buf76 del buf76 triton_poi_fused_convolution_relu_8[grid(32768)](buf77, primals_75, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_75 buf78 = extern_kernels.convolution(buf77, primals_76, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1)) buf79 = buf78 del buf78 triton_poi_fused_convolution_relu_8[grid(32768)](buf79, primals_77, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_77 buf80 = extern_kernels.convolution(buf79, primals_78, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf80, (4, 128, 8, 8), (8192, 64, 8, 1)) buf81 = buf80 del buf80 triton_poi_fused_convolution_relu_8[grid(32768)](buf81, primals_79, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_79 buf82 = extern_kernels.convolution(buf81, primals_80, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1)) buf83 = buf82 del buf82 triton_poi_fused_convolution_relu_8[grid(32768)](buf83, primals_81, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_81 buf84 = extern_kernels.convolution(buf83, primals_82, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1)) buf85 = buf84 del buf84 triton_poi_fused_convolution_relu_8[grid(32768)](buf85, primals_83, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_83 buf86 = extern_kernels.convolution(buf85, primals_84, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1)) buf87 = buf86 del buf86 triton_poi_fused_convolution_relu_8[grid(32768)](buf87, primals_85, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_85 buf88 = extern_kernels.convolution(buf87, primals_86, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf88, (4, 38, 8, 8), (2432, 64, 8, 1)) buf89 = extern_kernels.convolution(buf75, primals_88, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf89, (4, 128, 8, 8), (8192, 64, 8, 1)) buf90 = buf89 del buf89 triton_poi_fused_convolution_relu_8[grid(32768)](buf90, primals_89, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_89 buf91 = extern_kernels.convolution(buf90, primals_90, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf91, (4, 128, 8, 8), (8192, 64, 8, 1)) buf92 = buf91 del buf91 triton_poi_fused_convolution_relu_8[grid(32768)](buf92, primals_91, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_91 buf93 = extern_kernels.convolution(buf92, primals_92, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf93, (4, 128, 8, 8), (8192, 64, 8, 1)) buf94 = buf93 del buf93 triton_poi_fused_convolution_relu_8[grid(32768)](buf94, primals_93, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_93 buf95 = extern_kernels.convolution(buf94, primals_94, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf95, (4, 128, 8, 8), (8192, 64, 8, 1)) buf96 = buf95 del buf95 triton_poi_fused_convolution_relu_8[grid(32768)](buf96, primals_95, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_95 buf97 = extern_kernels.convolution(buf96, primals_96, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf97, (4, 128, 8, 8), (8192, 64, 8, 1)) buf98 = buf97 del buf97 triton_poi_fused_convolution_relu_8[grid(32768)](buf98, primals_97, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_97 buf99 = extern_kernels.convolution(buf98, primals_98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 128, 8, 8), (8192, 64, 8, 1)) buf100 = buf99 del buf99 triton_poi_fused_convolution_relu_8[grid(32768)](buf100, primals_99, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_99 buf101 = extern_kernels.convolution(buf100, primals_100, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf101, (4, 19, 8, 8), (1216, 64, 8, 1)) buf102 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32) triton_poi_fused_cat_9[grid(47360)](buf88, primals_87, buf101, primals_101, buf29, buf102, 47360, XBLOCK=512, num_warps=4, num_stages=1) del buf101 del buf88 del primals_101 del primals_87 buf103 = extern_kernels.convolution(buf102, primals_102, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf103, (4, 128, 8, 8), (8192, 64, 8, 1)) buf104 = buf103 del buf103 triton_poi_fused_convolution_relu_8[grid(32768)](buf104, primals_103, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_103 buf105 = extern_kernels.convolution(buf104, primals_104, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf105, (4, 128, 8, 8), (8192, 64, 8, 1)) buf106 = buf105 del buf105 triton_poi_fused_convolution_relu_8[grid(32768)](buf106, primals_105, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_105 buf107 = extern_kernels.convolution(buf106, primals_106, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf107, (4, 128, 8, 8), (8192, 64, 8, 1)) buf108 = buf107 del buf107 triton_poi_fused_convolution_relu_8[grid(32768)](buf108, primals_107, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_107 buf109 = extern_kernels.convolution(buf108, primals_108, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf109, (4, 128, 8, 8), (8192, 64, 8, 1)) buf110 = buf109 del buf109 triton_poi_fused_convolution_relu_8[grid(32768)](buf110, primals_109, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_109 buf111 = extern_kernels.convolution(buf110, primals_110, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf111, (4, 128, 8, 8), (8192, 64, 8, 1)) buf112 = buf111 del buf111 triton_poi_fused_convolution_relu_8[grid(32768)](buf112, primals_111, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_111 buf113 = extern_kernels.convolution(buf112, primals_112, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf113, (4, 128, 8, 8), (8192, 64, 8, 1)) buf114 = buf113 del buf113 triton_poi_fused_convolution_relu_8[grid(32768)](buf114, primals_113, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_113 buf115 = extern_kernels.convolution(buf114, primals_114, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf115, (4, 38, 8, 8), (2432, 64, 8, 1)) buf116 = extern_kernels.convolution(buf102, primals_116, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf116, (4, 128, 8, 8), (8192, 64, 8, 1)) buf117 = buf116 del buf116 triton_poi_fused_convolution_relu_8[grid(32768)](buf117, primals_117, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_117 buf118 = extern_kernels.convolution(buf117, primals_118, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf118, (4, 128, 8, 8), (8192, 64, 8, 1)) buf119 = buf118 del buf118 triton_poi_fused_convolution_relu_8[grid(32768)](buf119, primals_119, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_119 buf120 = extern_kernels.convolution(buf119, primals_120, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 128, 8, 8), (8192, 64, 8, 1)) buf121 = buf120 del buf120 triton_poi_fused_convolution_relu_8[grid(32768)](buf121, primals_121, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_121 buf122 = extern_kernels.convolution(buf121, primals_122, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf122, (4, 128, 8, 8), (8192, 64, 8, 1)) buf123 = buf122 del buf122 triton_poi_fused_convolution_relu_8[grid(32768)](buf123, primals_123, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_123 buf124 = extern_kernels.convolution(buf123, primals_124, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1)) buf125 = buf124 del buf124 triton_poi_fused_convolution_relu_8[grid(32768)](buf125, primals_125, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_125 buf126 = extern_kernels.convolution(buf125, primals_126, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1)) buf127 = buf126 del buf126 triton_poi_fused_convolution_relu_8[grid(32768)](buf127, primals_127, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_127 buf128 = extern_kernels.convolution(buf127, primals_128, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf128, (4, 19, 8, 8), (1216, 64, 8, 1)) buf129 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32) triton_poi_fused_cat_9[grid(47360)](buf115, primals_115, buf128, primals_129, buf29, buf129, 47360, XBLOCK=512, num_warps=4, num_stages=1) del buf115 del buf128 del primals_115 del primals_129 buf130 = extern_kernels.convolution(buf129, primals_130, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf130, (4, 128, 8, 8), (8192, 64, 8, 1)) buf131 = buf130 del buf130 triton_poi_fused_convolution_relu_8[grid(32768)](buf131, primals_131, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_131 buf132 = extern_kernels.convolution(buf131, primals_132, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf132, (4, 128, 8, 8), (8192, 64, 8, 1)) buf133 = buf132 del buf132 triton_poi_fused_convolution_relu_8[grid(32768)](buf133, primals_133, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_133 buf134 = extern_kernels.convolution(buf133, primals_134, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf134, (4, 128, 8, 8), (8192, 64, 8, 1)) buf135 = buf134 del buf134 triton_poi_fused_convolution_relu_8[grid(32768)](buf135, primals_135, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_135 buf136 = extern_kernels.convolution(buf135, primals_136, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf136, (4, 128, 8, 8), (8192, 64, 8, 1)) buf137 = buf136 del buf136 triton_poi_fused_convolution_relu_8[grid(32768)](buf137, primals_137, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_137 buf138 = extern_kernels.convolution(buf137, primals_138, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf138, (4, 128, 8, 8), (8192, 64, 8, 1)) buf139 = buf138 del buf138 triton_poi_fused_convolution_relu_8[grid(32768)](buf139, primals_139, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_139 buf140 = extern_kernels.convolution(buf139, primals_140, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1)) buf141 = buf140 del buf140 triton_poi_fused_convolution_relu_8[grid(32768)](buf141, primals_141, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_141 buf142 = extern_kernels.convolution(buf141, primals_142, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf142, (4, 38, 8, 8), (2432, 64, 8, 1)) buf143 = extern_kernels.convolution(buf129, primals_144, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf143, (4, 128, 8, 8), (8192, 64, 8, 1)) buf144 = buf143 del buf143 triton_poi_fused_convolution_relu_8[grid(32768)](buf144, primals_145, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_145 buf145 = extern_kernels.convolution(buf144, primals_146, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf145, (4, 128, 8, 8), (8192, 64, 8, 1)) buf146 = buf145 del buf145 triton_poi_fused_convolution_relu_8[grid(32768)](buf146, primals_147, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_147 buf147 = extern_kernels.convolution(buf146, primals_148, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf147, (4, 128, 8, 8), (8192, 64, 8, 1)) buf148 = buf147 del buf147 triton_poi_fused_convolution_relu_8[grid(32768)](buf148, primals_149, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_149 buf149 = extern_kernels.convolution(buf148, primals_150, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf149, (4, 128, 8, 8), (8192, 64, 8, 1)) buf150 = buf149 del buf149 triton_poi_fused_convolution_relu_8[grid(32768)](buf150, primals_151, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_151 buf151 = extern_kernels.convolution(buf150, primals_152, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf151, (4, 128, 8, 8), (8192, 64, 8, 1)) buf152 = buf151 del buf151 triton_poi_fused_convolution_relu_8[grid(32768)](buf152, primals_153, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_153 buf153 = extern_kernels.convolution(buf152, primals_154, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf153, (4, 128, 8, 8), (8192, 64, 8, 1)) buf154 = buf153 del buf153 triton_poi_fused_convolution_relu_8[grid(32768)](buf154, primals_155, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_155 buf155 = extern_kernels.convolution(buf154, primals_156, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf155, (4, 19, 8, 8), (1216, 64, 8, 1)) buf156 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32) triton_poi_fused_cat_9[grid(47360)](buf142, primals_143, buf155, primals_157, buf29, buf156, 47360, XBLOCK=512, num_warps=4, num_stages=1) del buf142 del buf155 del primals_143 del primals_157 buf157 = extern_kernels.convolution(buf156, primals_158, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf157, (4, 128, 8, 8), (8192, 64, 8, 1)) buf158 = buf157 del buf157 triton_poi_fused_convolution_relu_8[grid(32768)](buf158, primals_159, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_159 buf159 = extern_kernels.convolution(buf158, primals_160, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf159, (4, 128, 8, 8), (8192, 64, 8, 1)) buf160 = buf159 del buf159 triton_poi_fused_convolution_relu_8[grid(32768)](buf160, primals_161, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_161 buf161 = extern_kernels.convolution(buf160, primals_162, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf161, (4, 128, 8, 8), (8192, 64, 8, 1)) buf162 = buf161 del buf161 triton_poi_fused_convolution_relu_8[grid(32768)](buf162, primals_163, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_163 buf163 = extern_kernels.convolution(buf162, primals_164, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf163, (4, 128, 8, 8), (8192, 64, 8, 1)) buf164 = buf163 del buf163 triton_poi_fused_convolution_relu_8[grid(32768)](buf164, primals_165, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_165 buf165 = extern_kernels.convolution(buf164, primals_166, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf165, (4, 128, 8, 8), (8192, 64, 8, 1)) buf166 = buf165 del buf165 triton_poi_fused_convolution_relu_8[grid(32768)](buf166, primals_167, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_167 buf167 = extern_kernels.convolution(buf166, primals_168, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf167, (4, 128, 8, 8), (8192, 64, 8, 1)) buf168 = buf167 del buf167 triton_poi_fused_convolution_relu_8[grid(32768)](buf168, primals_169, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_169 buf169 = extern_kernels.convolution(buf168, primals_170, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf169, (4, 38, 8, 8), (2432, 64, 8, 1)) buf170 = buf169 del buf169 triton_poi_fused_convolution_10[grid(9728)](buf170, primals_171, 9728, XBLOCK=256, num_warps=4, num_stages=1) del primals_171 buf171 = extern_kernels.convolution(buf156, primals_172, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf171, (4, 128, 8, 8), (8192, 64, 8, 1)) buf172 = buf171 del buf171 triton_poi_fused_convolution_relu_8[grid(32768)](buf172, primals_173, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_173 buf173 = extern_kernels.convolution(buf172, primals_174, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf173, (4, 128, 8, 8), (8192, 64, 8, 1)) buf174 = buf173 del buf173 triton_poi_fused_convolution_relu_8[grid(32768)](buf174, primals_175, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_175 buf175 = extern_kernels.convolution(buf174, primals_176, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf175, (4, 128, 8, 8), (8192, 64, 8, 1)) buf176 = buf175 del buf175 triton_poi_fused_convolution_relu_8[grid(32768)](buf176, primals_177, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_177 buf177 = extern_kernels.convolution(buf176, primals_178, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf177, (4, 128, 8, 8), (8192, 64, 8, 1)) buf178 = buf177 del buf177 triton_poi_fused_convolution_relu_8[grid(32768)](buf178, primals_179, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_179 buf179 = extern_kernels.convolution(buf178, primals_180, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf179, (4, 128, 8, 8), (8192, 64, 8, 1)) buf180 = buf179 del buf179 triton_poi_fused_convolution_relu_8[grid(32768)](buf180, primals_181, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_181 buf181 = extern_kernels.convolution(buf180, primals_182, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf181, (4, 128, 8, 8), (8192, 64, 8, 1)) buf182 = buf181 del buf181 triton_poi_fused_convolution_relu_8[grid(32768)](buf182, primals_183, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_183 buf183 = extern_kernels.convolution(buf182, primals_184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf183, (4, 19, 8, 8), (1216, 64, 8, 1)) buf184 = buf183 del buf183 buf185 = empty_strided_cuda((4, 19, 8, 8), (1280, 64, 8, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_11[grid(4864)]( buf184, primals_185, buf185, 4864, XBLOCK=256, num_warps=4, num_stages=1) del primals_185 return (buf170, buf184, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, primals_54, primals_56, primals_58, primals_60, primals_62, primals_64, primals_66, primals_68, primals_70, primals_72, primals_74, primals_76, primals_78, primals_80, primals_82, primals_84, primals_86, primals_88, primals_90, primals_92, primals_94, primals_96, primals_98, primals_100, primals_102, primals_104, primals_106, primals_108, primals_110, primals_112, primals_114, primals_116, primals_118, primals_120, primals_122, primals_124, primals_126, primals_128, primals_130, primals_132, primals_134, primals_136, primals_138, primals_140, primals_142, primals_144, primals_146, primals_148, primals_150, primals_152, primals_154, primals_156, primals_158, primals_160, primals_162, primals_164, primals_166, primals_168, primals_170, primals_172, primals_174, primals_176, primals_178, primals_180, primals_182, primals_184, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf63, buf65, buf67, buf69, buf71, buf73, buf75, buf77, buf79, buf81, buf83, buf85, buf87, buf90, buf92, buf94, buf96, buf98, buf100, buf102, buf104, buf106, buf108, buf110, buf112, buf114, buf117, buf119, buf121, buf123, buf125, buf127, buf129, buf131, buf133, buf135, buf137, buf139, buf141, buf144, buf146, buf148, buf150, buf152, buf154, buf156, buf158, buf160, buf162, buf164, buf166, buf168, buf172, buf174, buf176, buf178, buf180, buf182, buf185) def make_layers(block, no_relu_layers): layers = [] for layer_name, v in block.items(): if 'pool' in layer_name: layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2]) layers.append((layer_name, layer)) else: conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers.append((layer_name, conv2d)) if layer_name not in no_relu_layers: layers.append(('relu_' + layer_name, nn.ReLU(inplace=True))) return nn.Sequential(OrderedDict(layers)) class bodypose_modelNew(nn.Module): def __init__(self): super(bodypose_modelNew, self).__init__() no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] blocks = {} block0 = OrderedDict({'conv1_1': [3, 64, 3, 1, 1], 'conv1_2': [64, 64, 3, 1, 1], 'pool1_stage1': [2, 2, 0], 'conv2_1': [64, 128, 3, 1, 1], 'conv2_2': [128, 128, 3, 1, 1], 'pool2_stage1': [2, 2, 0 ], 'conv3_1': [128, 256, 3, 1, 1], 'conv3_2': [256, 256, 3, 1, 1], 'conv3_3': [256, 256, 3, 1, 1], 'conv3_4': [256, 256, 3, 1, 1], 'pool3_stage1': [2, 2, 0], 'conv4_1': [256, 512, 3, 1, 1], 'conv4_2': [512, 512, 3, 1, 1], 'conv4_3_CPM': [512, 256, 3, 1, 1], 'conv4_4_CPM': [256, 128, 3, 1, 1]}) block1_1 = OrderedDict({'conv5_1_CPM_L1': [128, 128, 3, 1, 1], 'conv5_2_CPM_L1': [128, 128, 3, 1, 1], 'conv5_3_CPM_L1': [128, 128, 3, 1, 1], 'conv5_4_CPM_L1': [128, 512, 1, 1, 0], 'conv5_5_CPM_L1': [512, 38, 1, 1, 0]}) block1_2 = OrderedDict({'conv5_1_CPM_L2': [128, 128, 3, 1, 1], 'conv5_2_CPM_L2': [128, 128, 3, 1, 1], 'conv5_3_CPM_L2': [128, 128, 3, 1, 1], 'conv5_4_CPM_L2': [128, 512, 1, 1, 0], 'conv5_5_CPM_L2': [512, 19, 1, 1, 0]}) blocks['block1_1'] = block1_1 blocks['block1_2'] = block1_2 self.model0 = make_layers(block0, no_relu_layers) for i in range(2, 7): blocks['block%d_1' % i] = OrderedDict({('Mconv1_stage%d_L1' % i ): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L1' % i): [128, 128, 7, 1, 3], ('Mconv3_stage%d_L1' % i): [128, 128, 7, 1, 3], ('Mconv4_stage%d_L1' % i): [128, 128, 7, 1, 3], ( 'Mconv5_stage%d_L1' % i): [128, 128, 7, 1, 3], ( 'Mconv6_stage%d_L1' % i): [128, 128, 1, 1, 0], ( 'Mconv7_stage%d_L1' % i): [128, 38, 1, 1, 0]}) blocks['block%d_2' % i] = OrderedDict({('Mconv1_stage%d_L2' % i ): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L2' % i): [128, 128, 7, 1, 3], ('Mconv3_stage%d_L2' % i): [128, 128, 7, 1, 3], ('Mconv4_stage%d_L2' % i): [128, 128, 7, 1, 3], ( 'Mconv5_stage%d_L2' % i): [128, 128, 7, 1, 3], ( 'Mconv6_stage%d_L2' % i): [128, 128, 1, 1, 0], ( 'Mconv7_stage%d_L2' % i): [128, 19, 1, 1, 0]}) for k in blocks.keys(): blocks[k] = make_layers(blocks[k], no_relu_layers) self.model1_1 = blocks['block1_1'] self.model2_1 = blocks['block2_1'] self.model3_1 = blocks['block3_1'] self.model4_1 = blocks['block4_1'] self.model5_1 = blocks['block5_1'] self.model6_1 = blocks['block6_1'] self.model1_2 = blocks['block1_2'] self.model2_2 = blocks['block2_2'] self.model3_2 = blocks['block3_2'] self.model4_2 = blocks['block4_2'] self.model5_2 = blocks['block5_2'] self.model6_2 = blocks['block6_2'] def forward(self, input_0): primals_1 = self.model0.conv1_1.weight primals_2 = self.model0.conv1_1.bias primals_4 = self.model0.conv1_2.weight primals_5 = self.model0.conv1_2.bias primals_6 = self.model0.conv2_1.weight primals_7 = self.model0.conv2_1.bias primals_8 = self.model0.conv2_2.weight primals_9 = self.model0.conv2_2.bias primals_10 = self.model0.conv3_1.weight primals_11 = self.model0.conv3_1.bias primals_12 = self.model0.conv3_2.weight primals_13 = self.model0.conv3_2.bias primals_14 = self.model0.conv3_3.weight primals_15 = self.model0.conv3_3.bias primals_16 = self.model0.conv3_4.weight primals_17 = self.model0.conv3_4.bias primals_18 = self.model0.conv4_1.weight primals_19 = self.model0.conv4_1.bias primals_20 = self.model0.conv4_2.weight primals_21 = self.model0.conv4_2.bias primals_22 = self.model0.conv4_3_CPM.weight primals_23 = self.model0.conv4_3_CPM.bias primals_24 = self.model0.conv4_4_CPM.weight primals_25 = self.model0.conv4_4_CPM.bias primals_26 = self.model1_1.conv5_1_CPM_L1.weight primals_27 = self.model1_1.conv5_1_CPM_L1.bias primals_28 = self.model1_1.conv5_2_CPM_L1.weight primals_29 = self.model1_1.conv5_2_CPM_L1.bias primals_30 = self.model1_1.conv5_3_CPM_L1.weight primals_31 = self.model1_1.conv5_3_CPM_L1.bias primals_32 = self.model1_1.conv5_4_CPM_L1.weight primals_33 = self.model1_1.conv5_4_CPM_L1.bias primals_34 = self.model1_1.conv5_5_CPM_L1.weight primals_35 = self.model1_1.conv5_5_CPM_L1.bias primals_46 = self.model2_1.Mconv1_stage2_L1.weight primals_37 = self.model2_1.Mconv1_stage2_L1.bias primals_48 = self.model2_1.Mconv2_stage2_L1.weight primals_39 = self.model2_1.Mconv2_stage2_L1.bias primals_50 = self.model2_1.Mconv3_stage2_L1.weight primals_41 = self.model2_1.Mconv3_stage2_L1.bias primals_52 = self.model2_1.Mconv4_stage2_L1.weight primals_47 = self.model2_1.Mconv4_stage2_L1.bias primals_54 = self.model2_1.Mconv5_stage2_L1.weight primals_49 = self.model2_1.Mconv5_stage2_L1.bias primals_56 = self.model2_1.Mconv6_stage2_L1.weight primals_51 = self.model2_1.Mconv6_stage2_L1.bias primals_58 = self.model2_1.Mconv7_stage2_L1.weight primals_59 = self.model2_1.Mconv7_stage2_L1.bias primals_60 = self.model3_1.Mconv1_stage3_L1.weight primals_53 = self.model3_1.Mconv1_stage3_L1.bias primals_62 = self.model3_1.Mconv2_stage3_L1.weight primals_55 = self.model3_1.Mconv2_stage3_L1.bias primals_64 = self.model3_1.Mconv3_stage3_L1.weight primals_57 = self.model3_1.Mconv3_stage3_L1.bias primals_66 = self.model3_1.Mconv4_stage3_L1.weight primals_61 = self.model3_1.Mconv4_stage3_L1.bias primals_68 = self.model3_1.Mconv5_stage3_L1.weight primals_63 = self.model3_1.Mconv5_stage3_L1.bias primals_70 = self.model3_1.Mconv6_stage3_L1.weight primals_65 = self.model3_1.Mconv6_stage3_L1.bias primals_86 = self.model3_1.Mconv7_stage3_L1.weight primals_87 = self.model3_1.Mconv7_stage3_L1.bias primals_74 = self.model4_1.Mconv1_stage4_L1.weight primals_67 = self.model4_1.Mconv1_stage4_L1.bias primals_76 = self.model4_1.Mconv2_stage4_L1.weight primals_69 = self.model4_1.Mconv2_stage4_L1.bias primals_78 = self.model4_1.Mconv3_stage4_L1.weight primals_71 = self.model4_1.Mconv3_stage4_L1.bias primals_80 = self.model4_1.Mconv4_stage4_L1.weight primals_75 = self.model4_1.Mconv4_stage4_L1.bias primals_82 = self.model4_1.Mconv5_stage4_L1.weight primals_77 = self.model4_1.Mconv5_stage4_L1.bias primals_84 = self.model4_1.Mconv6_stage4_L1.weight primals_79 = self.model4_1.Mconv6_stage4_L1.bias primals_114 = self.model4_1.Mconv7_stage4_L1.weight primals_115 = self.model4_1.Mconv7_stage4_L1.bias primals_88 = self.model5_1.Mconv1_stage5_L1.weight primals_81 = self.model5_1.Mconv1_stage5_L1.bias primals_90 = self.model5_1.Mconv2_stage5_L1.weight primals_83 = self.model5_1.Mconv2_stage5_L1.bias primals_92 = self.model5_1.Mconv3_stage5_L1.weight primals_85 = self.model5_1.Mconv3_stage5_L1.bias primals_94 = self.model5_1.Mconv4_stage5_L1.weight primals_89 = self.model5_1.Mconv4_stage5_L1.bias primals_96 = self.model5_1.Mconv5_stage5_L1.weight primals_91 = self.model5_1.Mconv5_stage5_L1.bias primals_98 = self.model5_1.Mconv6_stage5_L1.weight primals_93 = self.model5_1.Mconv6_stage5_L1.bias primals_142 = self.model5_1.Mconv7_stage5_L1.weight primals_143 = self.model5_1.Mconv7_stage5_L1.bias primals_102 = self.model6_1.Mconv1_stage6_L1.weight primals_95 = self.model6_1.Mconv1_stage6_L1.bias primals_104 = self.model6_1.Mconv2_stage6_L1.weight primals_97 = self.model6_1.Mconv2_stage6_L1.bias primals_106 = self.model6_1.Mconv3_stage6_L1.weight primals_99 = self.model6_1.Mconv3_stage6_L1.bias primals_108 = self.model6_1.Mconv4_stage6_L1.weight primals_103 = self.model6_1.Mconv4_stage6_L1.bias primals_110 = self.model6_1.Mconv5_stage6_L1.weight primals_105 = self.model6_1.Mconv5_stage6_L1.bias primals_112 = self.model6_1.Mconv6_stage6_L1.weight primals_107 = self.model6_1.Mconv6_stage6_L1.bias primals_170 = self.model6_1.Mconv7_stage6_L1.weight primals_171 = self.model6_1.Mconv7_stage6_L1.bias primals_36 = self.model1_2.conv5_1_CPM_L2.weight primals_109 = self.model1_2.conv5_1_CPM_L2.bias primals_38 = self.model1_2.conv5_2_CPM_L2.weight primals_111 = self.model1_2.conv5_2_CPM_L2.bias primals_40 = self.model1_2.conv5_3_CPM_L2.weight primals_113 = self.model1_2.conv5_3_CPM_L2.bias primals_42 = self.model1_2.conv5_4_CPM_L2.weight primals_43 = self.model1_2.conv5_4_CPM_L2.bias primals_44 = self.model1_2.conv5_5_CPM_L2.weight primals_45 = self.model1_2.conv5_5_CPM_L2.bias primals_116 = self.model2_2.Mconv1_stage2_L2.weight primals_117 = self.model2_2.Mconv1_stage2_L2.bias primals_118 = self.model2_2.Mconv2_stage2_L2.weight primals_119 = self.model2_2.Mconv2_stage2_L2.bias primals_120 = self.model2_2.Mconv3_stage2_L2.weight primals_121 = self.model2_2.Mconv3_stage2_L2.bias primals_122 = self.model2_2.Mconv4_stage2_L2.weight primals_123 = self.model2_2.Mconv4_stage2_L2.bias primals_124 = self.model2_2.Mconv5_stage2_L2.weight primals_125 = self.model2_2.Mconv5_stage2_L2.bias primals_126 = self.model2_2.Mconv6_stage2_L2.weight primals_127 = self.model2_2.Mconv6_stage2_L2.bias primals_72 = self.model2_2.Mconv7_stage2_L2.weight primals_73 = self.model2_2.Mconv7_stage2_L2.bias primals_130 = self.model3_2.Mconv1_stage3_L2.weight primals_131 = self.model3_2.Mconv1_stage3_L2.bias primals_132 = self.model3_2.Mconv2_stage3_L2.weight primals_133 = self.model3_2.Mconv2_stage3_L2.bias primals_134 = self.model3_2.Mconv3_stage3_L2.weight primals_135 = self.model3_2.Mconv3_stage3_L2.bias primals_136 = self.model3_2.Mconv4_stage3_L2.weight primals_137 = self.model3_2.Mconv4_stage3_L2.bias primals_138 = self.model3_2.Mconv5_stage3_L2.weight primals_139 = self.model3_2.Mconv5_stage3_L2.bias primals_140 = self.model3_2.Mconv6_stage3_L2.weight primals_141 = self.model3_2.Mconv6_stage3_L2.bias primals_100 = self.model3_2.Mconv7_stage3_L2.weight primals_101 = self.model3_2.Mconv7_stage3_L2.bias primals_144 = self.model4_2.Mconv1_stage4_L2.weight primals_145 = self.model4_2.Mconv1_stage4_L2.bias primals_146 = self.model4_2.Mconv2_stage4_L2.weight primals_147 = self.model4_2.Mconv2_stage4_L2.bias primals_148 = self.model4_2.Mconv3_stage4_L2.weight primals_149 = self.model4_2.Mconv3_stage4_L2.bias primals_150 = self.model4_2.Mconv4_stage4_L2.weight primals_151 = self.model4_2.Mconv4_stage4_L2.bias primals_152 = self.model4_2.Mconv5_stage4_L2.weight primals_153 = self.model4_2.Mconv5_stage4_L2.bias primals_154 = self.model4_2.Mconv6_stage4_L2.weight primals_155 = self.model4_2.Mconv6_stage4_L2.bias primals_128 = self.model4_2.Mconv7_stage4_L2.weight primals_129 = self.model4_2.Mconv7_stage4_L2.bias primals_158 = self.model5_2.Mconv1_stage5_L2.weight primals_159 = self.model5_2.Mconv1_stage5_L2.bias primals_160 = self.model5_2.Mconv2_stage5_L2.weight primals_161 = self.model5_2.Mconv2_stage5_L2.bias primals_162 = self.model5_2.Mconv3_stage5_L2.weight primals_163 = self.model5_2.Mconv3_stage5_L2.bias primals_164 = self.model5_2.Mconv4_stage5_L2.weight primals_165 = self.model5_2.Mconv4_stage5_L2.bias primals_166 = self.model5_2.Mconv5_stage5_L2.weight primals_167 = self.model5_2.Mconv5_stage5_L2.bias primals_168 = self.model5_2.Mconv6_stage5_L2.weight primals_169 = self.model5_2.Mconv6_stage5_L2.bias primals_156 = self.model5_2.Mconv7_stage5_L2.weight primals_157 = self.model5_2.Mconv7_stage5_L2.bias primals_172 = self.model6_2.Mconv1_stage6_L2.weight primals_173 = self.model6_2.Mconv1_stage6_L2.bias primals_174 = self.model6_2.Mconv2_stage6_L2.weight primals_175 = self.model6_2.Mconv2_stage6_L2.bias primals_176 = self.model6_2.Mconv3_stage6_L2.weight primals_177 = self.model6_2.Mconv3_stage6_L2.bias primals_178 = self.model6_2.Mconv4_stage6_L2.weight primals_179 = self.model6_2.Mconv4_stage6_L2.bias primals_180 = self.model6_2.Mconv5_stage6_L2.weight primals_181 = self.model6_2.Mconv5_stage6_L2.bias primals_182 = self.model6_2.Mconv6_stage6_L2.weight primals_183 = self.model6_2.Mconv6_stage6_L2.bias primals_184 = self.model6_2.Mconv7_stage6_L2.weight primals_185 = self.model6_2.Mconv7_stage6_L2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185]) return output[0], output[1]
Schwartz-Zha/My_Pose_Estimation
bodypose_model
false
12,060
[ "MIT" ]
0
0ccaccf58498b2200842c155b735e1103c28c5ba
https://github.com/Schwartz-Zha/My_Pose_Estimation/tree/0ccaccf58498b2200842c155b735e1103c28c5ba
Scale
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/s3/cs3xfcsbv3q363t3gue76e5b2o6wfhbslxcdj5vsrheb24anhw4c.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.parallel class Scale(nn.Module): def __init__(self, init_value=1.0): super(Scale, self).__init__() self.scale = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input): return input * self.scale def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class ScaleNew(nn.Module): def __init__(self, init_value=1.0): super(ScaleNew, self).__init__() self.scale = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input_0): primals_1 = self.scale primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
XDong18/AdelaiDet
Scale
false
12,061
[ "BSD-2-Clause" ]
0
837cd1078923892fe6e84ac29fd0963f1b2c474f
https://github.com/XDong18/AdelaiDet/tree/837cd1078923892fe6e84ac29fd0963f1b2c474f
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/7v/c7vse4ttxzdbru7dfocfs3ww7nwsyfx6sr45vxlp52c3tp32neoh.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 0, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x2 = (xindex // 24) x3 = xindex % 24 x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-4) + x3 + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zd/czdnvxomk4eszij32s3lplwqtm76wnpfvm4wiwhrw2rhdjp2ogia.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.constant_pad_nd] # Source node to ATen node mapping: # x_1 => convolution # x_2 => constant_pad_nd_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [1, 1, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_convolution_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x4 = (xindex // 6) x2 = (xindex // 24) % 4 x5 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x4)), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + (x5), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/44/c44cszqvpf2gpsv3a7r2ty4gybhcwcagja4qfcv6v2z7birhmw3a.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_4 => constant_pad_nd_2 # Graph fragment: # %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) x2 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ad/cadvxkdcfg3ctfq2bzps3n3fnxzyo3lapisq5w45vitb3ptttep2.py # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.constant_pad_nd] # Source node to ATen node mapping: # x_5 => convolution_2 # x_6 => constant_pad_nd_3 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %constant_pad_nd_3 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution_2, [0, 0, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_convolution_3 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x4 = (xindex // 24) x5 = xindex % 24 x2 = (xindex // 24) % 4 x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-4) + x5 + (16*x4)), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + (x6), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3v/c3vkksfv6yaxxvqjtbkbreldbynuman3tm7flauw4zt5m6vl3rdw.py # Topologically Sorted Source Nodes: [x_3, x_7, out], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # out => add # x_3 => convolution_1 # x_7 => convolution_3 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %convolution_3), kwargs = {}) triton_poi_fused_add_convolution_4 = async_compile.triton('triton_poi_fused_add_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_convolution_1.run(buf1, primals_3, buf2, 384, grid=grid(384), stream=stream0) del buf1 del primals_3 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_2.run(primals_1, buf4, 384, grid=grid(384), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_convolution_3.run(buf5, primals_7, buf6, 384, grid=grid(384), stream=stream0) del buf5 del primals_7 # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_3, x_7, out], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_4.run(buf8, primals_5, buf7, primals_9, 256, grid=grid(256), stream=stream0) del buf7 del primals_5 del primals_9 return (buf8, primals_2, primals_4, primals_6, primals_8, buf0, buf2, buf4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 1), (12, 3, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 1), (12, 3, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torch.nn.parallel class Conv2D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, padding= 'same', stride=1, dilation=1, groups=1): super(Conv2D, self).__init__() assert type(kernel_size) in [int, tuple ], 'Allowed kernel type [int or tuple], not {}'.format(type( kernel_size)) assert padding == 'same', 'Allowed padding type {}, not {}'.format( 'same', padding) self.kernel_size = kernel_size if isinstance(kernel_size, tuple): self.h_kernel = kernel_size[0] self.w_kernel = kernel_size[1] else: self.h_kernel = kernel_size self.w_kernel = kernel_size self.padding = padding self.stride = stride self.dilation = dilation self.groups = groups self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=self.stride, dilation=self.dilation, groups=self.groups) def forward(self, x): if self.padding == 'same': height, width = x.shape[2:] h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel - height) w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel - width) pad_left = w_pad_need // 2 pad_right = w_pad_need - pad_left pad_top = h_pad_need // 2 pad_bottom = h_pad_need - pad_top padding = pad_left, pad_right, pad_top, pad_bottom x = F.pad(x, padding, 'constant', 0) x = self.conv(x) return x class GCN(nn.Module): """ Large Kernel Matters -- https://arxiv.org/abs/1703.02719 """ def __init__(self, in_channels, out_channels, k=3): super(GCN, self).__init__() self.conv_l1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') self.conv_l2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') def forward(self, x): x1 = self.conv_l1(x) x1 = self.conv_l2(x1) x2 = self.conv_r1(x) x2 = self.conv_r2(x2) out = x1 + x2 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.functional as F import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x2 = xindex // 24 x3 = xindex % 24 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x4 = xindex // 6 x2 = xindex // 24 % 4 x5 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x5, tmp10, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x4 = xindex // 24 x5 = xindex % 24 x2 = xindex // 24 % 4 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x5 + 16 * x4), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x6, tmp10, xmask) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(384)](primals_1, buf0, 384, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_1[grid(384)](buf1, primals_3, buf2, 384, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_2[grid(384)](primals_1, buf4, 384, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_3[grid(384)](buf5, primals_7, buf6, 384, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_7 buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf3 del buf3 triton_poi_fused_add_convolution_4[grid(256)](buf8, primals_5, buf7, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del primals_5 del primals_9 return (buf8, primals_2, primals_4, primals_6, primals_8, buf0, buf2, buf4, buf6) class Conv2D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, padding= 'same', stride=1, dilation=1, groups=1): super(Conv2D, self).__init__() assert type(kernel_size) in [int, tuple ], 'Allowed kernel type [int or tuple], not {}'.format(type( kernel_size)) assert padding == 'same', 'Allowed padding type {}, not {}'.format( 'same', padding) self.kernel_size = kernel_size if isinstance(kernel_size, tuple): self.h_kernel = kernel_size[0] self.w_kernel = kernel_size[1] else: self.h_kernel = kernel_size self.w_kernel = kernel_size self.padding = padding self.stride = stride self.dilation = dilation self.groups = groups self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=self.stride, dilation=self.dilation, groups=self.groups) def forward(self, x): if self.padding == 'same': height, width = x.shape[2:] h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel - height) w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel - width) pad_left = w_pad_need // 2 pad_right = w_pad_need - pad_left pad_top = h_pad_need // 2 pad_bottom = h_pad_need - pad_top padding = pad_left, pad_right, pad_top, pad_bottom x = F.pad(x, padding, 'constant', 0) x = self.conv(x) return x class GCNNew(nn.Module): """ Large Kernel Matters -- https://arxiv.org/abs/1703.02719 """ def __init__(self, in_channels, out_channels, k=3): super(GCNNew, self).__init__() self.conv_l1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') self.conv_l2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') def forward(self, input_0): primals_2 = self.conv_l1.conv.weight primals_3 = self.conv_l1.conv.bias primals_4 = self.conv_l2.conv.weight primals_5 = self.conv_l2.conv.bias primals_6 = self.conv_r1.conv.weight primals_7 = self.conv_r1.conv.bias primals_8 = self.conv_r2.conv.weight primals_9 = self.conv_r2.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
XDong18/AdelaiDet
GCN
false
12,062
[ "BSD-2-Clause" ]
0
837cd1078923892fe6e84ac29fd0963f1b2c474f
https://github.com/XDong18/AdelaiDet/tree/837cd1078923892fe6e84ac29fd0963f1b2c474f
DPLSTMCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cm/ccmo7fnacrzz2bpb3kjhjwznydxc2ydumd6dycmnumtbvfyp23ld.py # Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward] # Source node to ATen node mapping: # c_t => add_1 # f_t => sigmoid_1 # g_t => tanh # h_t => mul_2 # i_t => sigmoid # mul => mul # mul_1 => mul_1 # o_t => sigmoid_2 # tanh_1 => tanh_1 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {}) # %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_2,), kwargs = {}) # %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_7), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = tl.sigmoid(tmp30) tmp33 = tmp31 * tmp32 tmp34 = tmp7 * tmp23 tmp35 = tmp33 + tmp34 tmp36 = 1.0 tmp37 = tmp36 - tmp31 tmp38 = tmp31 * tmp37 tmp39 = libdevice.tanh(tmp35) tmp40 = tmp15 * tmp39 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp23, xmask) tl.store(out_ptr3 + (x2), tmp35, xmask) tl.store(out_ptr4 + (x2), tmp38, xmask) tl.store(out_ptr5 + (x2), tmp40, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0.run(buf0, primals_2, buf1, primals_5, primals_7, buf2, buf4, buf3, buf5, buf7, buf6, 16, grid=grid(16), stream=stream0) del buf0 del buf1 del primals_2 del primals_5 return (buf6, buf5, primals_3, primals_6, primals_7, buf2, buf3, buf4, buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from typing import Tuple class LSTMLinear(nn.Linear): """ This function is the same as a nn.Linear layer, except that in the backward pass the grad_samples get accumulated (instead of being concatenated as in the standard nn.Linear) """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'bool'=True): super().__init__(in_features, out_features, bias) class DPLSTMCell(nn.Module): """ Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated applications of this class. """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias) self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias) self.reset_parameters() def reset_parameters(self): """ Resets parameters by initializing them from an uniform distribution. """ stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): nn.init.uniform_(weight, -stdv, stdv) def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]: gates = self.ih(x) + self.hh(h_prev) i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates, self.hidden_size, 1) i_t = torch.sigmoid(i_t_input) f_t = torch.sigmoid(f_t_input) g_t = torch.tanh(g_t_input) o_t = torch.sigmoid(o_t_input) c_t = f_t * c_prev + i_t * g_t h_t = o_t * torch.tanh(c_t) return h_t, c_t def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = tl.sigmoid(tmp30) tmp33 = tmp31 * tmp32 tmp34 = tmp7 * tmp23 tmp35 = tmp33 + tmp34 tmp36 = 1.0 tmp37 = tmp36 - tmp31 tmp38 = tmp31 * tmp37 tmp39 = libdevice.tanh(tmp35) tmp40 = tmp15 * tmp39 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp23, xmask) tl.store(out_ptr3 + x2, tmp35, xmask) tl.store(out_ptr4 + x2, tmp38, xmask) tl.store(out_ptr5 + x2, tmp40, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(16)](buf0 , primals_2, buf1, primals_5, primals_7, buf2, buf4, buf3, buf5, buf7, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del primals_2 del primals_5 return (buf6, buf5, primals_3, primals_6, primals_7, buf2, buf3, buf4, buf5, buf7) class LSTMLinear(nn.Linear): """ This function is the same as a nn.Linear layer, except that in the backward pass the grad_samples get accumulated (instead of being concatenated as in the standard nn.Linear) """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'bool'=True): super().__init__(in_features, out_features, bias) class DPLSTMCellNew(nn.Module): """ Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated applications of this class. """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias) self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias) self.reset_parameters() def reset_parameters(self): """ Resets parameters by initializing them from an uniform distribution. """ stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): nn.init.uniform_(weight, -stdv, stdv) def forward(self, input_0, input_1, input_2): primals_1 = self.ih.weight primals_2 = self.ih.bias primals_4 = self.hh.weight primals_5 = self.hh.bias primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
adriansarstedt/opacus
DPLSTMCell
false
12,063
[ "Apache-2.0" ]
0
a6c89e3d6a3a4e3e4b82bc8c68d53265a9a7cba1
https://github.com/adriansarstedt/opacus/tree/a6c89e3d6a3a4e3e4b82bc8c68d53265a9a7cba1
SimpleAtariNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/sq/csqfb77zjt6xwjmyjavpb7rji5psvcrif566vzhwkbeg4ldzq7sz.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 8], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1285952 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 20093) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sm/csmngsvhiejfop563rjcwtsvjfkholzraodabis7aaid42vdaosf.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 141312 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1104) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ul/culsqsviria7nlcg3gowcpfoa4d2qmf4nqkhvncoi2rqa5d3q3s5.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 59136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 231) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rt/crtn2gwgctkt7j6gy42pkx4kuzg26mnoqck743kjorhzyzdxuqza.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 39680 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 155) % 64 x2 = (xindex // 9920) x3 = xindex % 9920 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x3 + (9984*x2)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xf/cxfzio3tfszzmzyf72tvvf66o7fqar5r7xhksxu57vny2wnrupwz.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_4 => relu_4 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 15872 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (16, 3, 12, 12), (432, 144, 12, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 3, 576, 576), (995328, 331776, 576, 1)) assert_size_stride(primals_4, (32, 16, 8, 8), (1024, 64, 8, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (512, 1280), (1280, 1)) assert_size_stride(primals_11, (512, ), (1, )) assert_size_stride(primals_12, (2, 512), (512, 1)) assert_size_stride(primals_13, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 8), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 283, 71), (321488, 20093, 71, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1285952, grid=grid(1285952), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 69, 16), (35328, 1104, 16, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 141312, grid=grid(141312), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 33, 7), (14784, 231, 7, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 59136, grid=grid(59136), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 31, 5), (9920, 155, 5, 1)) buf7 = buf6; del buf6 # reuse buf11 = empty_strided_cuda((4, 64, 31, 5), (9984, 155, 5, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_3.run(buf7, primals_9, buf11, 39680, grid=grid(39680), stream=stream0) del primals_9 buf8 = empty_strided_cuda((31, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (31, 1280), (1280, 1), 0), reinterpret_tensor(primals_10, (1280, 512), (1, 1280), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_11, 15872, grid=grid(15872), stream=stream0) del primals_11 buf10 = empty_strided_cuda((31, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf9, reinterpret_tensor(primals_12, (512, 2), (1, 512), 0), alpha=1, beta=1, out=buf10) del primals_13 return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, reinterpret_tensor(buf7, (31, 1280), (1280, 1), 0), buf9, primals_12, primals_10, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 3, 12, 12), (432, 144, 12, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 576, 576), (995328, 331776, 576, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 16, 8, 8), (1024, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((512, 1280), (1280, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((2, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as functional class SimpleAtariNet(nn.Module): def __init__(self): super(SimpleAtariNet, self).__init__() self.conv0 = nn.Conv2d(3, 16, 12, stride=(2, 8)) self.conv1 = nn.Conv2d(16, 32, 8, stride=4) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 64, 3, stride=1) self.lin1 = nn.Linear(1280, 512) self.lin2 = nn.Linear(512, 2) def forward(self, x): x = functional.relu(self.conv0(x)) x = functional.relu(self.conv1(x)) x = functional.relu(self.conv2(x)) x = functional.relu(self.conv3(x)) x = functional.relu(self.lin1(x.view(-1, 1280))) x = self.lin2(x) return x def get_inputs(): return [torch.rand([4, 3, 576, 576])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1285952 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 20093 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1104 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 59136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 231 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 39680 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 155 % 64 x2 = xindex // 9920 x3 = xindex % 9920 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x3 + 9984 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 15872 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (16, 3, 12, 12), (432, 144, 12, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 576, 576), (995328, 331776, 576, 1)) assert_size_stride(primals_4, (32, 16, 8, 8), (1024, 64, 8, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (512, 1280), (1280, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (2, 512), (512, 1)) assert_size_stride(primals_13, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 8), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 283, 71), (321488, 20093, 71, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1285952)](buf1, primals_2, 1285952, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 69, 16), (35328, 1104, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(141312)](buf3, primals_5, 141312, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 33, 7), (14784, 231, 7, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(59136)](buf5, primals_7, 59136, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 31, 5), (9920, 155, 5, 1)) buf7 = buf6 del buf6 buf11 = empty_strided_cuda((4, 64, 31, 5), (9984, 155, 5, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(39680)]( buf7, primals_9, buf11, 39680, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((31, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (31, 1280), (1280, 1), 0 ), reinterpret_tensor(primals_10, (1280, 512), (1, 1280), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(15872)](buf9, primals_11, 15872, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((31, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_13, buf9, reinterpret_tensor( primals_12, (512, 2), (1, 512), 0), alpha=1, beta=1, out=buf10) del primals_13 return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, reinterpret_tensor(buf7, (31, 1280), (1280, 1), 0 ), buf9, primals_12, primals_10, buf11) class SimpleAtariNetNew(nn.Module): def __init__(self): super(SimpleAtariNetNew, self).__init__() self.conv0 = nn.Conv2d(3, 16, 12, stride=(2, 8)) self.conv1 = nn.Conv2d(16, 32, 8, stride=4) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 64, 3, stride=1) self.lin1 = nn.Linear(1280, 512) self.lin2 = nn.Linear(512, 2) def forward(self, input_0): primals_1 = self.conv0.weight primals_2 = self.conv0.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_10 = self.lin1.weight primals_11 = self.lin1.bias primals_12 = self.lin2.weight primals_13 = self.lin2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
aaronmckinstry706/pytorch-practice
SimpleAtariNet
false
12,064
[ "MIT" ]
0
d3fd28733ea6de6a2e522ec52ff3e748df21b85a
https://github.com/aaronmckinstry706/pytorch-practice/tree/d3fd28733ea6de6a2e522ec52ff3e748df21b85a
Conv_ReLU_Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/sm/csmn2c5h6ncxnqle756u5rlgewfhiybu5xd5jyz7yap5pkjabpas.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, None) tl.store(out_ptr0 + (x0), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_2, (4, 64, 64, 64), (262144, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, 1048576, grid=grid(1048576), stream=stream0) return (buf1, primals_1, primals_2, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Conv_ReLU_Block(nn.Module): def __init__(self): super(Conv_ReLU_Block, self).__init__() self.conv = nn.Conv2d(in_channels=64, out_channels=64, kernel_size= 3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, x): return self.relu(self.conv(x)) def get_inputs(): return [torch.rand([4, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, None) tl.store(out_ptr0 + x0, tmp4, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_2, (4, 64, 64, 64), (262144, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1048576)](buf1, buf2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf2 class Conv_ReLU_BlockNew(nn.Module): def __init__(self): super(Conv_ReLU_BlockNew, self).__init__() self.conv = nn.Conv2d(in_channels=64, out_channels=64, kernel_size= 3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
advaza/pytorch-vdsr
Conv_ReLU_Block
false
12,065
[ "MIT" ]
0
8011f7323de3c7756df3828612addfb122c2bfef
https://github.com/advaza/pytorch-vdsr/tree/8011f7323de3c7756df3828612addfb122c2bfef
Gate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/6v/c6v5wbm5lsunafktut4znlxsnjnnqyz6khmykdxvnjhemkarthul.py # Topologically Sorted Source Nodes: [gate, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # gate => sigmoid # mul => mul # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_proj], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [gate, mul], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) return (buf1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Gate(nn.Module): """Gate Unit g = sigmoid(Wx) x = g * x """ def __init__(self, input_size): super(Gate, self).__init__() self.linear = nn.Linear(input_size, input_size, bias=False) def forward(self, x): """ Args: x: batch * len * dim x_mask: batch * len (1 for padding, 0 for true) Output: res: batch * len * dim """ x_proj = self.linear(x) gate = F.sigmoid(x) return x_proj * gate def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_2 class GateNew(nn.Module): """Gate Unit g = sigmoid(Wx) x = g * x """ def __init__(self, input_size): super(GateNew, self).__init__() self.linear = nn.Linear(input_size, input_size, bias=False) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
albert-dot-ai/MnemonicReader
Gate
false
12,066
[ "BSD-3-Clause" ]
0
eb51eb679a58677a405953c0c579568377c0b0f8
https://github.com/albert-dot-ai/MnemonicReader/tree/eb51eb679a58677a405953c0c579568377c0b0f8
ScoreLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.transforms import functional as F from torch.nn import functional as F import torch.nn as nn class ScoreLayer(nn.Module): def __init__(self, k): super(ScoreLayer, self).__init__() self.score = nn.Conv2d(k, 1, 1, 1) def forward(self, x, x_size=None): x = self.score(x) if x_size is not None: x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners =True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class ScoreLayerNew(nn.Module): def __init__(self, k): super(ScoreLayerNew, self).__init__() self.score = nn.Conv2d(k, 1, 1, 1) def forward(self, input_0): primals_1 = self.score.weight primals_2 = self.score.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
alejodosr/PoolNet
ScoreLayer
false
12,067
[ "MIT" ]
0
a6a19379933fe02c22f0eb0dd92038fe87cf0bd3
https://github.com/alejodosr/PoolNet/tree/a6a19379933fe02c22f0eb0dd92038fe87cf0bd3
NaiveGroupNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/5r/c5rdz5pqjqqsgx62pgljudybkgqw7637rjp23qdmziwn4mfhay4j.py # Topologically Sorted Source Nodes: [mean, pow_1, mean_1, pow_2, var, add, std, mul, input_4], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.mul] # Source node to ATen node mapping: # add => add # input_4 => add_1 # mean => mean # mean_1 => mean_1 # mul => mul # pow_1 => pow_1 # pow_2 => pow_2 # std => sqrt # var => sub # Graph fragment: # %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1], True), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean_1, %pow_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-05), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_3), kwargs = {}) triton_per_fused_add_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_pow_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp0 - tmp11 tmp19 = tmp18 / tmp17 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp17, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, pow_1, mean_1, pow_2, var, add, std, mul, input_4], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_pow_sqrt_sub_0.run(buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, grid=grid(4), stream=stream0) del primals_2 del primals_3 return (buf4, primals_1, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Parameter from torch.nn import init import torch.nn.parallel class NaiveGroupNorm(Module): """NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch. It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX. The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = NaiveGroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = NaiveGroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = NaiveGroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): super(NaiveGroupNorm, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_channels)) self.bias = Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input): N, C, H, W = input.size() assert C % self.num_groups == 0 input = input.reshape(N, self.num_groups, -1) mean = input.mean(dim=-1, keepdim=True) var = (input ** 2).mean(dim=-1, keepdim=True) - mean ** 2 std = torch.sqrt(var + self.eps) input = (input - mean) / std input = input.reshape(N, C, H, W) if self.affine: input = input * self.weight.reshape(1, C, 1, 1 ) + self.bias.reshape(1, C, 1, 1) return input def extra_repr(self): return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'. format(**self.__dict__)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1, 'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch.nn import Parameter from torch.nn import init import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp0 - tmp11 tmp19 = tmp18 / tmp17 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp17, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp23, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf4, primals_1, buf1, buf3 class NaiveGroupNormNew(Module): """NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch. It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX. The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = NaiveGroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = NaiveGroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = NaiveGroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): super(NaiveGroupNormNew, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_channels)) self.bias = Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'. format(**self.__dict__)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
XDong18/AdelaiDet
NaiveGroupNorm
false
12,068
[ "BSD-2-Clause" ]
0
837cd1078923892fe6e84ac29fd0963f1b2c474f
https://github.com/XDong18/AdelaiDet/tree/837cd1078923892fe6e84ac29fd0963f1b2c474f
SFU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py # Topologically Sorted Source Nodes: [r_f], Original ATen: [aten.cat] # Source node to ATen node mapping: # r_f => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/4h/c4hvd3vhapir7uardh64cxnc57lcta2i2xqwszrnyxam4nh7uanb.py # Topologically Sorted Source Nodes: [r, g, mul, sub, mul_1, o], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # g => sigmoid # mul => mul # mul_1 => mul_1 # o => add # r => tanh # sub => sub # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp7 = tl.load(in_ptr2 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [r_f], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [r, g, mul, sub, mul_1, o], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.rsub, aten.add] triton_poi_fused_add_mul_rsub_sigmoid_tanh_1.run(buf2, buf1, primals_1, buf3, 64, grid=grid(64), stream=stream0) return (buf3, primals_1, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SFU(nn.Module): """Semantic Fusion Unit The ouput vector is expected to not only retrieve correlative information from fusion vectors, but also retain partly unchange as the input vector """ def __init__(self, input_size, fusion_size): super(SFU, self).__init__() self.linear_r = nn.Linear(input_size + fusion_size, input_size) self.linear_g = nn.Linear(input_size + fusion_size, input_size) def forward(self, x, fusions): r_f = torch.cat([x, fusions], 2) r = F.tanh(self.linear_r(r_f)) g = F.sigmoid(self.linear_g(r_f)) o = g * r + (1 - g) * x return o def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'fusion_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp7 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(64)](buf2, buf1, primals_1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, primals_1, reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf1, buf2 class SFUNew(nn.Module): """Semantic Fusion Unit The ouput vector is expected to not only retrieve correlative information from fusion vectors, but also retain partly unchange as the input vector """ def __init__(self, input_size, fusion_size): super(SFUNew, self).__init__() self.linear_r = nn.Linear(input_size + fusion_size, input_size) self.linear_g = nn.Linear(input_size + fusion_size, input_size) def forward(self, input_0, input_1): primals_3 = self.linear_r.weight primals_4 = self.linear_r.bias primals_5 = self.linear_g.weight primals_6 = self.linear_g.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
albert-dot-ai/MnemonicReader
SFU
false
12,069
[ "BSD-3-Clause" ]
0
eb51eb679a58677a405953c0c579568377c0b0f8
https://github.com/albert-dot-ai/MnemonicReader/tree/eb51eb679a58677a405953c0c579568377c0b0f8
GraphNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_5 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_5 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 2048, grid=grid(2048), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 2048, grid=grid(2048), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GraphNet(nn.Module): def __init__(self, input_size, n_classes, num_neurons=32): super(GraphNet, self).__init__() self.fc1 = nn.Linear(input_size, num_neurons) self.fc2 = nn.Linear(num_neurons, num_neurons) self.fc3 = nn.Linear(num_neurons, n_classes) self.relu = nn.ReLU() self.sigmoid = nn.Softmax(dim=1) def forward(self, x): x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.relu(x) x = self.fc3(x) x = self.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf8, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3, primals_5, buf7, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class GraphNetNew(nn.Module): def __init__(self, input_size, n_classes, num_neurons=32): super(GraphNetNew, self).__init__() self.fc1 = nn.Linear(input_size, num_neurons) self.fc2 = nn.Linear(num_neurons, num_neurons) self.fc3 = nn.Linear(num_neurons, n_classes) self.relu = nn.ReLU() self.sigmoid = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
adam2392/dldo
GraphNet
false
12,070
[ "MIT" ]
0
fc57f8700eb048558ab205c2c77a064f1a7cc7f6
https://github.com/adam2392/dldo/tree/fc57f8700eb048558ab205c2c77a064f1a7cc7f6
FCLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/dn/cdnhr6ixjduuhci57kobqjnehjrl22mcyjqzuuhvtxxshy437diy.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_1 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FCLayer(nn.Module): def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True): super().__init__() self.use_activation = use_activation self.dropout = nn.Dropout(dropout_rate) self.linear = nn.Linear(input_dim, output_dim) self.tanh = nn.Tanh() def forward(self, x): x = self.dropout(x) if self.use_activation: x = self.tanh(x) return self.linear(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class FCLayerNew(nn.Module): def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True): super().__init__() self.use_activation = use_activation self.dropout = nn.Dropout(dropout_rate) self.linear = nn.Linear(input_dim, output_dim) self.tanh = nn.Tanh() def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
alexandre-do/r-bert
FCLayer
false
12,071
[ "Apache-2.0" ]
0
4e35bcbb0fe0602e708e18010e2394ebbfb074c4
https://github.com/alexandre-do/r-bert/tree/4e35bcbb0fe0602e708e18010e2394ebbfb074c4
SimpleGCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/oh/cohgrxe3lxmrv72r6z5fwk7g676tipqeoy4yj23mmym2s32bis5c.py # Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # output => cat # output_1 => add # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mm_1, %slice_4], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %primals_4), kwargs = {}) triton_poi_fused_add_cat_0 = async_compile.triton('triton_poi_fused_add_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp11 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (2 + (4*x1) + ((-2) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp12 = tmp10 + tmp11 tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [side_1], Original ATen: [aten.mm] extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 2), (4, 1), 0), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.cat, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_cat_0.run(buf1, buf0, primals_4, buf2, 16, grid=grid(16), stream=stream0) del buf0 del buf1 del primals_4 return (buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter import torch.nn import torch.autograd class SimpleGCN(nn.Module): """A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 .. note:: If you use this code, please cite the original paper in addition to Kaolin. .. code-block:: @article{kipf2016semi, title={Semi-Supervised Classification with Graph Convolutional Networks}, author={Kipf, Thomas N and Welling, Max}, journal={arXiv preprint arXiv:1609.02907}, year={2016} } """ def __init__(self, in_features, out_features, bias=True): super(SimpleGCN, self).__init__() self.in_features = in_features self.out_features = out_features self.weight1 = Parameter(torch.Tensor(in_features, out_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 6.0 / math.sqrt(self.weight1.size(1) + self.weight1.size(0)) stdv *= 0.6 self.weight1.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-0.1, 0.1) def forward(self, input, adj): support = torch.mm(input, self.weight1) side_len = max(support.shape[1] // 3, 2) if adj.type() == 'torch.cuda.sparse.FloatTensor': norm = torch.sparse.mm(adj, torch.ones((support.shape[0], 1))) normalized_support = support[:, :side_len] / norm side_1 = torch.sparse.mm(adj, normalized_support) else: side_1 = torch.mm(adj, support[:, :side_len]) side_2 = support[:, side_len:] output = torch.cat((side_1, side_2), dim=1) if self.bias is not None: output = output + self.bias return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter import torch.nn import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp11 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (2 + 4 * x1 + (-2 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp12 = tmp10 + tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 2), (4, 1 ), 0), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_cat_0[grid(16)](buf1, buf0, primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del primals_4 return buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class SimpleGCNNew(nn.Module): """A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 .. note:: If you use this code, please cite the original paper in addition to Kaolin. .. code-block:: @article{kipf2016semi, title={Semi-Supervised Classification with Graph Convolutional Networks}, author={Kipf, Thomas N and Welling, Max}, journal={arXiv preprint arXiv:1609.02907}, year={2016} } """ def __init__(self, in_features, out_features, bias=True): super(SimpleGCNNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight1 = Parameter(torch.Tensor(in_features, out_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 6.0 / math.sqrt(self.weight1.size(1) + self.weight1.size(0)) stdv *= 0.6 self.weight1.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-0.1, 0.1) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def forward(self, input_0, input_1): primals_1 = self.weight1 primals_4 = self.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
acivgin1/kaolin
SimpleGCN
false
12,072
[ "ECL-2.0", "Apache-2.0" ]
0
4c4e0098b2cd9a73709c81fea82de03abbd6cdd5
https://github.com/acivgin1/kaolin/tree/4c4e0098b2cd9a73709c81fea82de03abbd6cdd5
DepthwiseSeperableConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/o6/co6pflndmsdhmqwe2jfrf4itwvl27ku5p27kydz44oxklfdvmyvc.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1], [2], [1], False, [0], 4), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 5) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf0, (1, 4, 5), (20, 5, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 20, grid=grid(20), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 5), (0, 5, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 5), (20, 5, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf3, primals_5, 20, grid=grid(20), stream=stream0) del primals_5 return (reinterpret_tensor(buf3, (4, 5), (5, 1), 0), primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DepthwiseSeperableConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super(DepthwiseSeperableConv1d, self).__init__() self.depthwise_conv1d = nn.Conv1d(in_channels, in_channels, kernel_size, groups=in_channels, padding=kernel_size // 2) self.pointwise_conv1d = nn.Conv1d(in_channels, out_channels, 1) def forward(self, x): x = self.depthwise_conv1d(x) x = self.pointwise_conv1d(x) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf0, (1, 4, 5), (20, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(20)](buf1, primals_2, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 5 ), (0, 5, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 5), (20, 5, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(20)](buf3, primals_5, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf3, (4, 5), (5, 1), 0 ), primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), ( 16, 4, 1), 0), buf1 class DepthwiseSeperableConv1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super(DepthwiseSeperableConv1dNew, self).__init__() self.depthwise_conv1d = nn.Conv1d(in_channels, in_channels, kernel_size, groups=in_channels, padding=kernel_size // 2) self.pointwise_conv1d = nn.Conv1d(in_channels, out_channels, 1) def forward(self, input_0): primals_1 = self.depthwise_conv1d.weight primals_2 = self.depthwise_conv1d.bias primals_4 = self.pointwise_conv1d.weight primals_5 = self.pointwise_conv1d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
allenye0119/pytorch-modules
DepthwiseSeperableConv1d
false
12,074
[ "MIT" ]
0
c7683ef63478becca3b79a7498840450da33f468
https://github.com/allenye0119/pytorch-modules/tree/c7683ef63478becca3b79a7498840450da33f468
LRN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/t4/ct42qpaygn7av2p6rystjl4hk3ybzwp5jyvmk3jaiukfiri3pq65.py # Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div] # Source node to ATen node mapping: # add => add # div_2 => pow_2 # mul => mul # x => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) triton_poi_fused_add_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 + tmp2 tmp6 = 0.75 tmp7 = libdevice.pow(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LRN(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True ): super(LRN, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(1.0).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(1.0).pow(self.beta) x = x.div(div) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 + tmp2 tmp6 = 0.75 tmp7 = libdevice.pow(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LRNNew(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True ): super(LRNNew, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
anas-awadalla/dissect
LRN
false
12,075
[ "MIT" ]
0
d74e9147731c6160274405a39ab1c98191929269
https://github.com/anas-awadalla/dissect/tree/d74e9147731c6160274405a39ab1c98191929269
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zb/czbtvtaquu6hd4qsgmoykgrs64gmpitzjrr366bcqkjsx3el44xo.py # Topologically Sorted Source Nodes: [sub, add, sqrt, x_1, mul, x_2], Original ATen: [aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # sqrt => sqrt # sub => sub # x_1 => div # x_2 => add_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %expand), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 1e-06), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_2, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {}) triton_poi_fused_add_div_mul_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, add, sqrt, x_1, mul, x_2], Original ATen: [aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_sqrt_sub_0.run(primals_2, primals_1, primals_3, buf0, 16, grid=grid(16), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor import torch.nn as nn from torch.nn import Parameter from torch.autograd import Variable class LayerNorm(nn.Module): """ Layer Normalization based on Ba & al.: 'Layer Normalization' https://arxiv.org/pdf/1607.06450.pdf """ def __init__(self, input_size, learnable=True, epsilon=1e-06): super(LayerNorm, self).__init__() self.input_size = input_size self.learnable = learnable self.alpha = Tensor(1, input_size).fill_(1) self.beta = Tensor(1, input_size).fill_(0) self.epsilon = epsilon if learnable: W = Parameter else: W = Variable self.alpha = W(self.alpha) self.beta = W(self.beta) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.input_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x): size = x.size() x = x.view(x.size(0), -1) x = (x - torch.mean(x, 1).unsqueeze(1).expand_as(x)) / torch.sqrt( torch.var(x, 1).unsqueeze(1).expand_as(x) + self.epsilon) if self.learnable: x = self.alpha.expand_as(x) * x + self.beta.expand_as(x) return x.view(size) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import Tensor import torch.nn as nn from torch.nn import Parameter from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sqrt_sub_0[grid(16)](primals_2, primals_1, primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): """ Layer Normalization based on Ba & al.: 'Layer Normalization' https://arxiv.org/pdf/1607.06450.pdf """ def __init__(self, input_size, learnable=True, epsilon=1e-06): super(LayerNormNew, self).__init__() self.input_size = input_size self.learnable = learnable self.alpha = Tensor(1, input_size).fill_(1) self.beta = Tensor(1, input_size).fill_(0) self.epsilon = epsilon if learnable: W = Parameter else: W = Variable self.alpha = W(self.alpha) self.beta = W(self.beta) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.input_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, input_0): primals_2 = self.alpha primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
alex-kj-chin/universal-computation
LayerNorm
false
12,076
[ "MIT" ]
0
a41cc7d685a3e0c56c11bc346c25394464da2e06
https://github.com/alex-kj-chin/universal-computation/tree/a41cc7d685a3e0c56c11bc346c25394464da2e06
ResidualBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/m7/cm7fc7g7lxk67xnv3lillroicm7xh2eet3wkukiooko77asy37p7.py # Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_1 => convolution # out_2 => gt, mul, where # out_3 => _unsafe_index_2, _unsafe_index_3 # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x4 = (xindex // 36) x2 = (xindex // 36) % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x5), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yl/cyl57twtgf3lzd5sst7snomgtzysir6mpvrzx6jm7k4lxpcq6sru.py # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # out_4 => convolution_1 # out_5 => add # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ww/cwwltv55jxxho5gnp34wwe5lqhqcsfoleiys2expo7arrq5gnabb.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # out_1 => convolution # out_2 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d] triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1.run(buf1, primals_3, buf2, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_2.run(buf4, primals_5, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3.run(buf1, primals_3, buf5, 256, grid=grid(256), stream=stream0) del buf1 del primals_3 return (buf4, primals_2, primals_4, buf0, buf2, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, norm =None, bias=True): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = nn.ReflectionPad2d(reflection_padding) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias) self.norm = norm if norm == 'BN': self.norm_layer = nn.BatchNorm2d(out_channels) elif norm == 'IN': self.norm_layer = nn.InstanceNorm2d(out_channels, track_running_stats=True) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) if self.norm in ['BN' or 'IN']: out = self.norm_layer(out) return out class ResidualBlock(nn.Module): def __init__(self, channels, norm=None, bias=True): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1, bias=bias, norm=norm) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1, bias=bias, norm=norm) self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): input = x out = self.relu(self.conv1(x)) out = self.conv2(out) out = out + input return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1[grid(576)]( buf1, primals_3, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_convolution_2[grid(256)](buf4, primals_5, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid(256) ](buf1, primals_3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1 ) del buf1 del primals_3 return buf4, primals_2, primals_4, buf0, buf2, buf5 class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, norm =None, bias=True): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = nn.ReflectionPad2d(reflection_padding) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias) self.norm = norm if norm == 'BN': self.norm_layer = nn.BatchNorm2d(out_channels) elif norm == 'IN': self.norm_layer = nn.InstanceNorm2d(out_channels, track_running_stats=True) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) if self.norm in ['BN' or 'IN']: out = self.norm_layer(out) return out class ResidualBlockNew(nn.Module): def __init__(self, channels, norm=None, bias=True): super(ResidualBlockNew, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1, bias=bias, norm=norm) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1, bias=bias, norm=norm) self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv1.conv2d.bias primals_4 = self.conv2.conv2d.weight primals_5 = self.conv2.conv2d.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
alhsu713/fast_blind_video_consistency
ResidualBlock
false
12,078
[ "MIT" ]
0
2037ec5f68a361b926c31b3a12c1cd04e2331797
https://github.com/alhsu713/fast_blind_video_consistency/tree/2037ec5f68a361b926c31b3a12c1cd04e2331797
InnerProductModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/jb/cjbh6emqja7rl4lagss2czst5sqfxza2dkqgllp2soznirengezy.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mv] # Source node to ATen node mapping: # linear => mul, sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %primals_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mv_0 = async_compile.triton('triton_poi_fused_mv_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mv_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + (x0), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mv] stream0 = get_raw_stream(0) triton_poi_fused_mv_0.run(primals_2, primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 return (reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class InnerProductModel(torch.nn.Module): @staticmethod def is_valid_model_type(model_type): raise NotImplementedError @staticmethod def get_model_from_type(model_type): raise NotImplementedError @property def loss_criterion(self): return torch.nn.MSELoss() def __init__(self, n): super().__init__() self.layer = torch.nn.Linear(n, 1, bias=False) self.layer.weight.data = torch.arange(n, dtype=torch.float32) def forward(self, x): return self.layer(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_mv_0[grid(64)](primals_2, primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 return reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), primals_2 class InnerProductModelNew(torch.nn.Module): @staticmethod def is_valid_model_type(model_type): raise NotImplementedError @staticmethod def get_model_from_type(model_type): raise NotImplementedError @property def loss_criterion(self): return torch.nn.MSELoss() def __init__(self, n): super().__init__() self.layer = torch.nn.Linear(n, 1, bias=False) self.layer.weight.data = torch.arange(n, dtype=torch.float32) def forward(self, input_0): primals_1 = self.layer.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
SheepiesLab/plato
InnerProductModel
false
12,079
[ "Apache-2.0" ]
0
9f5bbfa4b6952d1b3af24be409982d303d54a169
https://github.com/SheepiesLab/plato/tree/9f5bbfa4b6952d1b3af24be409982d303d54a169
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ac/cacdwifxdru2eihx3n66wqfym5hjpdo6yxk3gsol5t54xroplkwv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] # Source node to ATen node mapping: # x => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vp/cvpyw2bjgo55x2ne47dmpi2vmqu5t4eb3wcvjgjcnove3xyj7bcr.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400, ), (1, )) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300, ), (1, )) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 1600, grid=grid(1600), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf4, primals_6, 1200, grid=grid(1200), stream=stream0) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 return (buf6, buf0, buf2, buf4, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() self.l1 = nn.Linear(state_dim + action_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, 1) def forward(self, x, u): x = F.relu(self.l1(torch.cat([x, u], 1))) x = F.relu(self.l2(x)) x = self.l3(x) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400,), (1,)) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300,), (1,)) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(1600)](buf2, primals_4, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), ( 1, 400), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_2[grid(1200)](buf4, primals_6, 1200, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 return buf6, buf0, buf2, buf4, primals_7, primals_5 class CriticNew(nn.Module): def __init__(self, state_dim, action_dim): super(CriticNew, self).__init__() self.l1 = nn.Linear(state_dim + action_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, 1) def forward(self, input_0, input_1): primals_3 = self.l1.weight primals_4 = self.l1.bias primals_5 = self.l2.weight primals_6 = self.l2.bias primals_7 = self.l3.weight primals_8 = self.l3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
SheepiesLab/plato
Critic
false
12,080
[ "Apache-2.0" ]
0
9f5bbfa4b6952d1b3af24be409982d303d54a169
https://github.com/SheepiesLab/plato/tree/9f5bbfa4b6952d1b3af24be409982d303d54a169
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cw/ccwppgcg22wuzun4xtln5oajdlq3temaao2gx2o5yvat5eethuvt.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1216 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 19 y1 = (yindex // 19) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (19*x2) + (171*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fg/cfghwb2hummsru7uqdvqmk4g7qb2jsg56zyr2cbpqefdfrc5jihb.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 76 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 19 y1 = (yindex // 19) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (19*x2) + (77824*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ra/crarmf7s2qf36jg27hprl42qtwcxcnnoyrgzgevtstzj4qgsdzwl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yd/cydvmxsmzwizyj5fbgjnjeeo27as6zdlft5s5uj57ovvcxtlbfhh.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hc/chc4ngj4mkoo4satyzg4z43rbbkycqq4jvvvdlykndskywnfxqkl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bf/cbf366a2yqsgje7qwk2rwcpp5p5463aubcubwcpy3r4kou4d5acl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (4096*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ue/cueawvikh6bpxa25cztzwzjpszjzmtrgrdraojxxgavyes2ekekj.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/z7/cz7op4xayabwplmw4lcknylic3pui3f7uqnal7rt56fujxax55dk.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hm/chm5iqtfasmjkzseox33t2dndy7unxmt5p33p4snkvydoeglz3mz.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hb/chbn2k5z6lk54z2pcj2ybjkyvjods3je3ocnyk23klcsofbc6ew6.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3936256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gk/cgknleucb3y2hxq644yt2wbukhs2pkemi74igp4huxplagtq6gja.py # Topologically Sorted Source Nodes: [conv2d_4, x_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_4 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3686400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bb/cbb3oregfboycrii4phmw4js37jsuaprlgv3xu5wenemy767ghpw.py # Topologically Sorted Source Nodes: [conv2d_5, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # x_5 => relu_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 3249 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = (yindex // 256) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (831744*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (3264*y3)), tmp4, xmask) tl.store(out_ptr1 + (y0 + (256*x2) + (831744*y1)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/om/comsuwuzojgebfhuciv6mfraudhjs62qoqztwkiqx7uwl3pnwh3w.py # Topologically Sorted Source Nodes: [conv2d_5, x_5, x_6], Original ATen: [aten.convolution, aten.relu, aten.view] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # x_5 => relu_5 # x_6 => view # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_5, [-1, 256]), kwargs = {}) triton_poi_fused_convolution_relu_view_12 = async_compile.triton('triton_poi_fused_convolution_relu_view_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_view_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_view_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3326976 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + ((3264*(x0 // 3249)) + (x0 % 3249)), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (64, 19, 3, 3), (171, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 19, 64, 64), (77824, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (7, 256), (256, 1)) assert_size_stride(primals_15, (7, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 19, 3, 3), (171, 1, 57, 19), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 1216, 9, grid=grid(1216, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 19, 64, 64), (77824, 1, 1216, 19), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 76, 4096, grid=grid(76, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_8 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_10, buf5, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_10 buf6 = empty_strided_cuda((256, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_12, buf6, 65536, 16, grid=grid(65536, 16), stream=stream0) del primals_12 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf8, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 64, 64), (524288, 1, 8192, 128)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf10, primals_5, 2097152, grid=grid(2097152), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf12, primals_7, 4194304, grid=grid(4194304), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 256, 62, 62), (984064, 1, 15872, 256)) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf14, primals_9, 3936256, grid=grid(3936256), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 60, 60), (921600, 1, 15360, 256)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [conv2d_4, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf16, primals_11, 3686400, grid=grid(3686400), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 57, 57), (831744, 1, 14592, 256)) buf18 = empty_strided_cuda((4, 256, 57, 57), (835584, 3264, 57, 1), torch.float32) buf21 = empty_strided_cuda((4, 256, 57, 57), (831744, 1, 14592, 256), torch.bool) # Topologically Sorted Source Nodes: [conv2d_5, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_11.run(buf17, primals_13, buf18, buf21, 1024, 3249, grid=grid(1024, 3249), stream=stream0) del primals_13 buf19 = reinterpret_tensor(buf17, (12996, 256), (256, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [conv2d_5, x_5, x_6], Original ATen: [aten.convolution, aten.relu, aten.view] triton_poi_fused_convolution_relu_view_12.run(buf18, buf19, 3326976, grid=grid(3326976), stream=stream0) del buf18 buf20 = empty_strided_cuda((12996, 7), (7, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, buf19, reinterpret_tensor(primals_14, (256, 7), (1, 256), 0), alpha=1, beta=1, out=buf20) del primals_15 return (buf20, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf12, buf14, buf16, buf19, primals_14, buf21, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 19, 3, 3), (171, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 19, 64, 64), (77824, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((7, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((7, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.a1 = nn.Conv2d(19, 64, kernel_size=3, padding=1) self.a2 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.a3 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.b1 = nn.Conv2d(256, 256, kernel_size=3, padding=0) self.b2 = nn.Conv2d(256, 256, kernel_size=3, padding=0) self.b3 = nn.Conv2d(256, 256, kernel_size=4, padding=0) self.linear = nn.Linear(256, 7) def forward(self, x): x = F.relu(self.a1(x)) x = F.relu(self.a2(x)) x = F.relu(self.a3(x)) x = F.relu(self.b1(x)) x = F.relu(self.b2(x)) x = F.relu(self.b3(x)) x = x.view(-1, 256) x = self.linear(x) return x def get_inputs(): return [torch.rand([4, 19, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 1216 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 19 y1 = yindex // 19 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 19 * x2 + 171 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 76 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 19 y1 = yindex // 19 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 19 * x2 + 77824 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 4096 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 3249 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 831744 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 3264 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 831744 * y1), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_view_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3326976 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3264 * (x0 // 3249) + x0 % 3249), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (64, 19, 3, 3), (171, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 19, 64, 64), (77824, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (7, 256), (256, 1)) assert_size_stride(primals_15, (7,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 19, 3, 3), (171, 1, 57, 19), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(1216, 9)](primals_1, buf0, 1216, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 19, 64, 64), (77824, 1, 1216, 19), torch.float32) triton_poi_fused_1[grid(76, 4096)](primals_3, buf1, 76, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_4, buf2, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(32768, 9)](primals_6, buf3, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_4[grid(65536, 9)](primals_8, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_4[grid(65536, 9)](primals_10, buf5, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) triton_poi_fused_5[grid(65536, 16)](primals_12, buf6, 65536, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(1048576)](buf8, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf9 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 64, 64), (524288, 1, 8192, 128)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(2097152)](buf10, primals_5, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf11 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_8[grid(4194304)](buf12, primals_7, 4194304, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf13 = extern_kernels.convolution(buf12, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 256, 62, 62), (984064, 1, 15872, 256)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_9[grid(3936256)](buf14, primals_9, 3936256, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf15 = extern_kernels.convolution(buf14, buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 60, 60), (921600, 1, 15360, 256)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_10[grid(3686400)](buf16, primals_11, 3686400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf17 = extern_kernels.convolution(buf16, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 57, 57), (831744, 1, 14592, 256)) buf18 = empty_strided_cuda((4, 256, 57, 57), (835584, 3264, 57, 1), torch.float32) buf21 = empty_strided_cuda((4, 256, 57, 57), (831744, 1, 14592, 256 ), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_11[grid(1024, 3249)](buf17, primals_13, buf18, buf21, 1024, 3249, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_13 buf19 = reinterpret_tensor(buf17, (12996, 256), (256, 1), 0) del buf17 triton_poi_fused_convolution_relu_view_12[grid(3326976)](buf18, buf19, 3326976, XBLOCK=512, num_warps=8, num_stages=1) del buf18 buf20 = empty_strided_cuda((12996, 7), (7, 1), torch.float32) extern_kernels.addmm(primals_15, buf19, reinterpret_tensor( primals_14, (256, 7), (1, 256), 0), alpha=1, beta=1, out=buf20) del primals_15 return (buf20, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf12, buf14, buf16, buf19, primals_14, buf21) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.a1 = nn.Conv2d(19, 64, kernel_size=3, padding=1) self.a2 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.a3 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.b1 = nn.Conv2d(256, 256, kernel_size=3, padding=0) self.b2 = nn.Conv2d(256, 256, kernel_size=3, padding=0) self.b3 = nn.Conv2d(256, 256, kernel_size=4, padding=0) self.linear = nn.Linear(256, 7) def forward(self, input_0): primals_1 = self.a1.weight primals_2 = self.a1.bias primals_4 = self.a2.weight primals_5 = self.a2.bias primals_6 = self.a3.weight primals_7 = self.a3.bias primals_8 = self.b1.weight primals_9 = self.b1.bias primals_10 = self.b2.weight primals_11 = self.b2.bias primals_12 = self.b3.weight primals_13 = self.b3.bias primals_14 = self.linear.weight primals_15 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
afozk95/chess-dataset
Net
false
12,081
[ "MIT" ]
0
08de7b251f67cb8553a5ee66f6fd76cefeb14bb4
https://github.com/afozk95/chess-dataset/tree/08de7b251f67cb8553a5ee66f6fd76cefeb14bb4
SimmatModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/mf/cmfacdeh6vjcea6klvpa2izxaljxxu7laxdtuzrkb3pxxcikgp4h.py # Topologically Sorted Source Nodes: [a_denom, b_denom, mul, sim_1], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # a_denom => add # b_denom => add_1 # mul => mul # sim_1 => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, 1e-09), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 1e-09), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, %mul), kwargs = {}) triton_poi_fused_add_div_mul_0 = async_compile.triton('triton_poi_fused_add_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (4*x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + (x3), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/oe/coe2zmts3vusxk7vdbhxxsdob7b7ylyunwhmmnrpkk4gsrvsycki.py # Topologically Sorted Source Nodes: [a_denom_1, b_denom_1, mul_1, sim_5], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # a_denom_1 => add_2 # b_denom_1 => add_3 # mul_1 => mul_1 # sim_5 => div_1 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_4, 1e-09), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_5, 1e-09), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %add_3), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_1, %mul_1), kwargs = {}) triton_poi_fused_add_div_mul_1 = async_compile.triton('triton_poi_fused_add_div_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (64 + (4*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (65 + (4*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (66 + (4*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (67 + (4*x4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (64 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (65 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (66 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (67 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + (x3), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ig/ciglo32lpxefzh6pphhumvkwdhe2hlwwfmcdza4qfdmba3wyiazs.py # Topologically Sorted Source Nodes: [a_denom_2, b_denom_2, mul_2, sim_9], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # a_denom_2 => add_4 # b_denom_2 => add_5 # mul_2 => mul_2 # sim_9 => div_2 # Graph fragment: # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_8, 1e-09), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_9, 1e-09), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, %add_5), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_2, %mul_2), kwargs = {}) triton_poi_fused_add_div_mul_2 = async_compile.triton('triton_poi_fused_add_div_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (128 + (4*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (129 + (4*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (130 + (4*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (131 + (4*x4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (128 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (129 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (130 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (131 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + (x3), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ql/cql7orlq7zxffgocedy5ohfpyddboisasznvye6fjkfq25mxofj6.py # Topologically Sorted Source Nodes: [a_denom_3, b_denom_3, mul_3, sim_13], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # a_denom_3 => add_6 # b_denom_3 => add_7 # mul_3 => mul_3 # sim_13 => div_3 # Graph fragment: # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_12, 1e-09), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_13, 1e-09), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, %add_7), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_3, %mul_3), kwargs = {}) triton_poi_fused_add_div_mul_3 = async_compile.triton('triton_poi_fused_add_div_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (192 + (4*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (193 + (4*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (194 + (4*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (195 + (4*x4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (192 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (193 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (194 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (195 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + (x3), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vv/cvv2ti6slkvzgivnm3mq7yh7uljpvgnxqf7osgl7x2objk2vfw5z.py # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where_1, %where_3, %where_5, %where_7], 1), kwargs = {}) triton_poi_fused_stack_4 = async_compile.triton('triton_poi_fused_stack_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 16 x0 = xindex % 4 x2 = (xindex // 64) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = -1.0 tmp7 = tmp5 == tmp6 tmp8 = tl.load(in_ptr1 + ((4*x2) + x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp8 == tmp6 tmp10 = tl.load(in_ptr2 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tl.where(tmp9, tmp11, tmp10) tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp20 == tmp6 tmp22 = tl.load(in_ptr1 + ((4*x2) + ((-4) + x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp22 == tmp6 tmp24 = tl.load(in_ptr3 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp19 & xmask, other=0.0) tmp25 = tl.where(tmp23, tmp11, tmp24) tmp26 = tl.where(tmp21, tmp11, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp19, tmp26, tmp27) tmp29 = tmp0 >= tmp17 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp33 == tmp6 tmp35 = tl.load(in_ptr1 + ((4*x2) + ((-8) + x1)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp35 == tmp6 tmp37 = tl.load(in_ptr4 + (x0 + (4*((-8) + x1)) + (16*x2)), tmp32 & xmask, other=0.0) tmp38 = tl.where(tmp36, tmp11, tmp37) tmp39 = tl.where(tmp34, tmp11, tmp38) tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp32, tmp39, tmp40) tmp42 = tmp0 >= tmp30 tmp43 = tl.full([1], 16, tl.int64) tmp44 = tmp0 < tmp43 tmp45 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp45 == tmp6 tmp47 = tl.load(in_ptr1 + ((4*x2) + ((-12) + x1)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp47 == tmp6 tmp49 = tl.load(in_ptr5 + (x0 + (4*((-12) + x1)) + (16*x2)), tmp42 & xmask, other=0.0) tmp50 = tl.where(tmp48, tmp11, tmp49) tmp51 = tl.where(tmp46, tmp11, tmp50) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp42, tmp51, tmp52) tmp54 = tl.where(tmp32, tmp41, tmp53) tmp55 = tl.where(tmp19, tmp28, tmp54) tmp56 = tl.where(tmp4, tmp15, tmp55) tl.store(out_ptr0 + (x3), tmp56, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(arg3_1, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [a_denom, b_denom, mul, sim_1], Original ATen: [aten.add, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_0.run(buf1, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim_4], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 64), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 64), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [a_denom_1, b_denom_1, mul_1, sim_5], Original ATen: [aten.add, aten.mul, aten.div] triton_poi_fused_add_div_mul_1.run(buf3, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim_8], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 128), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 128), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [a_denom_2, b_denom_2, mul_2, sim_9], Original ATen: [aten.add, aten.mul, aten.div] triton_poi_fused_add_div_mul_2.run(buf5, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim_12], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 192), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 192), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [a_denom_3, b_denom_3, mul_3, sim_13], Original ATen: [aten.add, aten.mul, aten.div] triton_poi_fused_add_div_mul_3.run(buf7, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf8 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] triton_poi_fused_stack_4.run(arg3_1, arg2_1, buf1, buf3, buf5, buf7, buf8, 256, grid=grid(256), stream=stream0) del arg2_1 del arg3_1 del buf1 del buf3 del buf5 del buf7 return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class SimmatModule(torch.nn.Module): def __init__(self, padding=-1): super().__init__() self.padding = padding self._hamming_index_loaded = None self._hamming_index = None def forward(self, query_embed, doc_embed, query_tok, doc_tok): simmat = [] for a_emb, b_emb in zip(query_embed, doc_embed): BAT, A, B = a_emb.shape[0], a_emb.shape[1], b_emb.shape[1] a_denom = a_emb.norm(p=2, dim=2).reshape(BAT, A, 1).expand(BAT, A, B) + 1e-09 b_denom = b_emb.norm(p=2, dim=2).reshape(BAT, 1, B).expand(BAT, A, B) + 1e-09 perm = b_emb.permute(0, 2, 1) sim = a_emb.bmm(perm) sim = sim / (a_denom * b_denom) nul = torch.zeros_like(sim) sim = torch.where(query_tok.reshape(BAT, A, 1).expand(BAT, A, B ) == self.padding, nul, sim) sim = torch.where(doc_tok.reshape(BAT, 1, B).expand(BAT, A, B) == self.padding, nul, sim) simmat.append(sim) return torch.stack(simmat, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 1]), torch.rand([4, 1, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 4 * x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x4), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x4), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x4), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_add_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (64 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (65 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp6 = tl.load(in_ptr0 + (66 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (67 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (64 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (65 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (66 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (67 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_add_div_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (128 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (129 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (130 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (131 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (128 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (129 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (130 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (131 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_add_div_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (192 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (193 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (194 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (195 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (192 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (193 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (194 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (195 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_stack_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x0 = xindex % 4 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = -1.0 tmp7 = tmp5 == tmp6 tmp8 = tl.load(in_ptr1 + (4 * x2 + x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp8 == tmp6 tmp10 = tl.load(in_ptr2 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tl.where(tmp9, tmp11, tmp10) tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp19 & xmask, eviction_policy ='evict_last', other=0.0) tmp21 = tmp20 == tmp6 tmp22 = tl.load(in_ptr1 + (4 * x2 + (-4 + x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp22 == tmp6 tmp24 = tl.load(in_ptr3 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp25 = tl.where(tmp23, tmp11, tmp24) tmp26 = tl.where(tmp21, tmp11, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp19, tmp26, tmp27) tmp29 = tmp0 >= tmp17 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp32 & xmask, eviction_policy ='evict_last', other=0.0) tmp34 = tmp33 == tmp6 tmp35 = tl.load(in_ptr1 + (4 * x2 + (-8 + x1)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp35 == tmp6 tmp37 = tl.load(in_ptr4 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp32 & xmask, other=0.0) tmp38 = tl.where(tmp36, tmp11, tmp37) tmp39 = tl.where(tmp34, tmp11, tmp38) tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp32, tmp39, tmp40) tmp42 = tmp0 >= tmp30 tl.full([1], 16, tl.int64) tmp45 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp42 & xmask, eviction_policy ='evict_last', other=0.0) tmp46 = tmp45 == tmp6 tmp47 = tl.load(in_ptr1 + (4 * x2 + (-12 + x1)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp47 == tmp6 tmp49 = tl.load(in_ptr5 + (x0 + 4 * (-12 + x1) + 16 * x2), tmp42 & xmask, other=0.0) tmp50 = tl.where(tmp48, tmp11, tmp49) tmp51 = tl.where(tmp46, tmp11, tmp50) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp42, tmp51, tmp52) tmp54 = tl.where(tmp32, tmp41, tmp53) tmp55 = tl.where(tmp19, tmp28, tmp54) tmp56 = tl.where(tmp4, tmp15, tmp55) tl.store(out_ptr0 + x3, tmp56, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(arg3_1, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_mul_0[grid(64)](buf1, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 64), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 64), out =buf2) buf3 = buf2 del buf2 triton_poi_fused_add_div_mul_1[grid(64)](buf3, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 128), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 128), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_add_div_mul_2[grid(64)](buf5, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 192), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 192), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_add_div_mul_3[grid(64)](buf7, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf8 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused_stack_4[grid(256)](arg3_1, arg2_1, buf1, buf3, buf5, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg2_1 del arg3_1 del buf1 del buf3 del buf5 del buf7 return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), class SimmatModuleNew(torch.nn.Module): def __init__(self, padding=-1): super().__init__() self.padding = padding self._hamming_index_loaded = None self._hamming_index = None def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
alpers/FlexNeuART
SimmatModule
false
12,082
[ "Apache-2.0" ]
0
2ae263f46b6eb2f1435b9073dad629a2fef23ab9
https://github.com/alpers/FlexNeuART/tree/2ae263f46b6eb2f1435b9073dad629a2fef23ab9
PACRRConvMax2dModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xw/cxwrk2fodabatoqbpxcl5eb2ifidbtfcttbbx5hhgzol35c2myr4.py # Topologically Sorted Source Nodes: [simmat], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # simmat => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 3, 0, 3], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 7) % 7 x0 = xindex % 7 x2 = (xindex // 49) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ch/cchl3etcboj5jw4xggmrhxdnvzhxcggabe4regsbgagybypkwnfy.py # Topologically Sorted Source Nodes: [conv2d, conv, max_1], Original ATen: [aten.convolution, aten.relu, aten.max] # Source node to ATen node mapping: # conv => relu # conv2d => convolution # max_1 => max_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%relu, 1), kwargs = {}) triton_poi_fused_convolution_max_relu_1 = async_compile.triton('triton_poi_fused_convolution_max_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_max_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_max_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr1 + (1)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp26 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp27 = tl.load(in_ptr1 + (2)) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp45 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp46 = tl.load(in_ptr1 + (3)) tmp47 = tl.broadcast_to(tmp46, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp11 = tmp5 > tmp10 tmp12 = tmp5 == tmp10 tmp13 = tmp5 != tmp5 tmp14 = tmp10 != tmp10 tmp15 = tmp13 > tmp14 tmp16 = tmp11 | tmp15 tmp17 = tmp13 & tmp14 tmp18 = tmp12 | tmp17 tmp19 = tl.full([1], 0, tl.int64) tmp20 = tl.full([1], 1, tl.int64) tmp21 = tmp19 < tmp20 tmp22 = tmp18 & tmp21 tmp23 = tmp16 | tmp22 tmp24 = tl.where(tmp23, tmp5, tmp10) tmp25 = tl.where(tmp23, tmp19, tmp20) tmp29 = tmp26 + tmp28 tmp30 = triton_helpers.maximum(tmp4, tmp29) tmp31 = tmp24 > tmp30 tmp32 = tmp24 == tmp30 tmp33 = tmp24 != tmp24 tmp34 = tmp30 != tmp30 tmp35 = tmp33 > tmp34 tmp36 = tmp31 | tmp35 tmp37 = tmp33 & tmp34 tmp38 = tmp32 | tmp37 tmp39 = tl.full([1], 2, tl.int64) tmp40 = tmp25 < tmp39 tmp41 = tmp38 & tmp40 tmp42 = tmp36 | tmp41 tmp43 = tl.where(tmp42, tmp24, tmp30) tmp44 = tl.where(tmp42, tmp25, tmp39) tmp48 = tmp45 + tmp47 tmp49 = triton_helpers.maximum(tmp4, tmp48) tmp50 = tmp43 > tmp49 tmp51 = tmp43 == tmp49 tmp52 = tmp43 != tmp43 tmp53 = tmp49 != tmp49 tmp54 = tmp52 > tmp53 tmp55 = tmp50 | tmp54 tmp56 = tmp52 & tmp53 tmp57 = tmp51 | tmp56 tmp58 = tl.full([1], 3, tl.int64) tmp59 = tmp44 < tmp58 tmp60 = tmp57 & tmp59 tmp61 = tmp55 | tmp60 tmp62 = tl.where(tmp61, tmp43, tmp49) tmp63 = tl.where(tmp61, tmp44, tmp58) tmp64 = triton_helpers.maximum(tmp5, tmp10) tmp65 = triton_helpers.maximum(tmp64, tmp30) tmp66 = triton_helpers.maximum(tmp65, tmp49) tl.store(out_ptr0 + (x2), tmp63, xmask) tl.store(out_ptr1 + (x2), tmp66, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/35/c35vxy5oullorpkglb324ij376h6o5mbjsqjoykh7yiskiyinasn.py # Topologically Sorted Source Nodes: [conv2d, conv], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv => relu # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [simmat], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, conv, max_1], Original ATen: [aten.convolution, aten.relu, aten.max] triton_poi_fused_convolution_max_relu_1.run(buf1, primals_3, buf2, buf3, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d, conv, max_1, topk], Original ATen: [aten.convolution, aten.relu, aten.max, aten.topk] buf4 = torch.ops.aten.topk.default(buf3, 4, 2) del buf3 buf5 = buf4[0] buf6 = buf4[1] del buf4 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, conv], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf1, primals_3, buf7, 256, grid=grid(256), stream=stream0) del buf1 del primals_3 return (buf5, primals_2, buf0, buf6, reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 16, 4, 1), 0), buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class PACRRConvMax2dModule(torch.nn.Module): def __init__(self, shape, n_filters, k, channels): super().__init__() self.shape = shape if shape != 1: self.pad = torch.nn.ConstantPad2d((0, shape - 1, 0, shape - 1), 0) else: self.pad = None self.conv = torch.nn.Conv2d(channels, n_filters, shape) self.activation = torch.nn.ReLU() self.k = k self.shape = shape self.channels = channels def forward(self, simmat): BATCH, _CHANNELS, QLEN, DLEN = simmat.shape if self.pad: simmat = self.pad(simmat) conv = self.activation(self.conv(simmat)) top_filters, _ = conv.max(dim=1) if DLEN < self.k: top_filters = torch.nn.functional.pad(top_filters, (0, self.k - DLEN)) top_toks, _ = top_filters.topk(self.k, dim=2) result = top_toks.reshape(BATCH, QLEN, self.k) return result def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'shape': 4, 'n_filters': 4, 'k': 4, 'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_max_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp26 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr1 + 2) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp45 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp46 = tl.load(in_ptr1 + 3) tmp47 = tl.broadcast_to(tmp46, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp11 = tmp5 > tmp10 tmp12 = tmp5 == tmp10 tmp13 = tmp5 != tmp5 tmp14 = tmp10 != tmp10 tmp15 = tmp13 > tmp14 tmp16 = tmp11 | tmp15 tmp17 = tmp13 & tmp14 tmp18 = tmp12 | tmp17 tmp19 = tl.full([1], 0, tl.int64) tmp20 = tl.full([1], 1, tl.int64) tmp21 = tmp19 < tmp20 tmp22 = tmp18 & tmp21 tmp23 = tmp16 | tmp22 tmp24 = tl.where(tmp23, tmp5, tmp10) tmp25 = tl.where(tmp23, tmp19, tmp20) tmp29 = tmp26 + tmp28 tmp30 = triton_helpers.maximum(tmp4, tmp29) tmp31 = tmp24 > tmp30 tmp32 = tmp24 == tmp30 tmp33 = tmp24 != tmp24 tmp34 = tmp30 != tmp30 tmp35 = tmp33 > tmp34 tmp36 = tmp31 | tmp35 tmp37 = tmp33 & tmp34 tmp38 = tmp32 | tmp37 tmp39 = tl.full([1], 2, tl.int64) tmp40 = tmp25 < tmp39 tmp41 = tmp38 & tmp40 tmp42 = tmp36 | tmp41 tmp43 = tl.where(tmp42, tmp24, tmp30) tmp44 = tl.where(tmp42, tmp25, tmp39) tmp48 = tmp45 + tmp47 tmp49 = triton_helpers.maximum(tmp4, tmp48) tmp50 = tmp43 > tmp49 tmp51 = tmp43 == tmp49 tmp52 = tmp43 != tmp43 tmp53 = tmp49 != tmp49 tmp54 = tmp52 > tmp53 tmp55 = tmp50 | tmp54 tmp56 = tmp52 & tmp53 tmp57 = tmp51 | tmp56 tmp58 = tl.full([1], 3, tl.int64) tmp59 = tmp44 < tmp58 tmp60 = tmp57 & tmp59 tmp61 = tmp55 | tmp60 tl.where(tmp61, tmp43, tmp49) tmp63 = tl.where(tmp61, tmp44, tmp58) tmp64 = triton_helpers.maximum(tmp5, tmp10) tmp65 = triton_helpers.maximum(tmp64, tmp30) tmp66 = triton_helpers.maximum(tmp65, tmp49) tl.store(out_ptr0 + x2, tmp63, xmask) tl.store(out_ptr1 + x2, tmp66, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_max_relu_1[grid(64)](buf1, primals_3, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = torch.ops.aten.topk.default(buf3, 4, 2) del buf3 buf5 = buf4[0] buf6 = buf4[1] del buf4 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf1, primals_3, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 return buf5, primals_2, buf0, buf6, reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 16, 4, 1), 0), buf7 class PACRRConvMax2dModuleNew(torch.nn.Module): def __init__(self, shape, n_filters, k, channels): super().__init__() self.shape = shape if shape != 1: self.pad = torch.nn.ConstantPad2d((0, shape - 1, 0, shape - 1), 0) else: self.pad = None self.conv = torch.nn.Conv2d(channels, n_filters, shape) self.activation = torch.nn.ReLU() self.k = k self.shape = shape self.channels = channels def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
alpers/FlexNeuART
PACRRConvMax2dModule
false
12,083
[ "Apache-2.0" ]
0
2ae263f46b6eb2f1435b9073dad629a2fef23ab9
https://github.com/alpers/FlexNeuART/tree/2ae263f46b6eb2f1435b9073dad629a2fef23ab9
AverageAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/75/c75w3rgnfmm4c7hp5div65urlkb5kzh2656pt75swmio7vzn3vp3.py # Topologically Sorted Source Nodes: [ones, triangle, mask], Original ATen: [aten.ones, aten.tril, aten.mul] # Source node to ATen node mapping: # mask => mul_1 # ones => full_default # triangle => full_default_1, le, sub, where # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%sub, 0), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %permute), kwargs = {}) triton_poi_fused_mul_ones_tril_0 = async_compile.triton('triton_poi_fused_mul_ones_tril_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_ones_tril_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_ones_tril_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 + ((-1)*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = 1 + x1 tmp7 = tmp6.to(tl.float32) tmp8 = tmp3 / tmp7 tmp9 = tmp5 * tmp8 tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ha/chavpwdtejkyqus2olvrr56v6fhdolpm5dx6l26ahmwfvz664fnv.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %bmm], -1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bj/cbjkk5x2yiy67l3q4l7ooe5u7plvwkualpweocfe25rsydr62zek.py # Topologically Sorted Source Nodes: [sigmoid, mul_1, sigmoid_1, mul_2, gating_outputs_1], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.sigmoid_backward] # Source node to ATen node mapping: # gating_outputs_1 => add_1 # mul_1 => mul_2 # mul_2 => mul_3 # sigmoid => sigmoid # sigmoid_1 => sigmoid_1 # Graph fragment: # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_1), kwargs = {}) # %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %bmm), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_2), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tmp8 = tmp6 + tmp7 tmp9 = tl.sigmoid(tmp8) tmp11 = tmp9 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp9 tmp15 = tmp9 * tmp14 tmp16 = tmp13 - tmp3 tmp17 = tmp3 * tmp16 tl.store(out_ptr0 + (x2), tmp12, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 8), (8, 1)) assert_size_stride(primals_3, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [ones, triangle, mask], Original ATen: [aten.ones, aten.tril, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_ones_tril_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [average_outputs], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (0, 4, 1), 0), primals_1, out=buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, buf1, buf2, 128, grid=grid(128), stream=stream0) buf3 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul_1, sigmoid_1, mul_2, gating_outputs_1], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.sigmoid_backward] triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2.run(buf3, primals_3, primals_1, buf1, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) del buf3 del primals_3 return (buf4, buf1, primals_1, buf1, reinterpret_tensor(buf2, (16, 8), (8, 1), 0), buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class PositionwiseFeedForward(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability in :math:`[0, 1)`. """ def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(dropout) def forward(self, x): """Layer definition. Args: x: ``(batch_size, input_len, model_dim)`` Returns: (FloatTensor): Output ``(batch_size, input_len, model_dim)``. """ inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x def update_dropout(self, dropout): self.dropout_1.p = dropout self.dropout_2.p = dropout class AverageAttention(nn.Module): """ Average Attention module from "Accelerating Neural Transformer via an Average Attention Network" :cite:`DBLP:journals/corr/abs-1805-00631`. Args: model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter """ def __init__(self, model_dim, dropout=0.1, aan_useffn=False): self.model_dim = model_dim self.aan_useffn = aan_useffn super(AverageAttention, self).__init__() if aan_useffn: self.average_layer = PositionwiseFeedForward(model_dim, model_dim, dropout) self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2) def cumulative_average_mask(self, batch_size, inputs_len, device): """ Builds the mask to compute the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3 Args: batch_size (int): batch size inputs_len (int): length of the inputs Returns: (FloatTensor): * A Tensor of shape ``(batch_size, input_len, input_len)`` """ triangle = torch.tril(torch.ones(inputs_len, inputs_len, dtype= torch.float, device=device)) weights = torch.ones(1, inputs_len, dtype=torch.float, device=device ) / torch.arange(1, inputs_len + 1, dtype=torch.float, device= device) mask = triangle * weights.transpose(0, 1) return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) def cumulative_average(self, inputs, mask_or_step, layer_cache=None, step=None): """ Computes the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6) Args: inputs (FloatTensor): sequence to average ``(batch_size, input_len, dimension)`` mask_or_step: if cache is set, this is assumed to be the current step of the dynamic decoding. Otherwise, it is the mask matrix used to compute the cumulative average. layer_cache: a dictionary containing the cumulative average of the previous step. Returns: a tensor of the same shape and type as ``inputs``. """ if layer_cache is not None: step = mask_or_step average_attention = (inputs + step * layer_cache['prev_g']) / (step + 1) layer_cache['prev_g'] = average_attention return average_attention else: mask = mask_or_step return torch.matmul(mask, inputs) def forward(self, inputs, mask=None, layer_cache=None, step=None): """ Args: inputs (FloatTensor): ``(batch_size, input_len, model_dim)`` Returns: (FloatTensor, FloatTensor): * gating_outputs ``(batch_size, input_len, model_dim)`` * average_outputs average attention ``(batch_size, input_len, model_dim)`` """ batch_size = inputs.size(0) inputs_len = inputs.size(1) average_outputs = self.cumulative_average(inputs, self. cumulative_average_mask(batch_size, inputs_len, inputs.device) if layer_cache is None else step, layer_cache=layer_cache) if self.aan_useffn: average_outputs = self.average_layer(average_outputs) gating_outputs = self.gating_layer(torch.cat((inputs, average_outputs), -1)) input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2) gating_outputs = torch.sigmoid(input_gate) * inputs + torch.sigmoid( forget_gate) * average_outputs return gating_outputs, average_outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'model_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_ones_tril_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 + -1 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = 1 + x1 tmp7 = tmp6.to(tl.float32) tmp8 = tmp3 / tmp7 tmp9 = tmp5 * tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tmp8 = tmp6 + tmp7 tmp9 = tl.sigmoid(tmp8) tmp11 = tmp9 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp9 tmp15 = tmp9 * tmp14 tmp16 = tmp13 - tmp3 tmp17 = tmp3 * tmp16 tl.store(out_ptr0 + x2, tmp12, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp17, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 8), (8, 1)) assert_size_stride(primals_3, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_ones_tril_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (0, 4, 1), 0 ), primals_1, out=buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_1[grid(128)](primals_1, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2[grid(64)](buf3, primals_3, primals_1, buf1, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_3 return buf4, buf1, primals_1, buf1, reinterpret_tensor(buf2, (16, 8), ( 8, 1), 0), buf5, buf6 class PositionwiseFeedForward(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability in :math:`[0, 1)`. """ def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(dropout) def forward(self, x): """Layer definition. Args: x: ``(batch_size, input_len, model_dim)`` Returns: (FloatTensor): Output ``(batch_size, input_len, model_dim)``. """ inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x def update_dropout(self, dropout): self.dropout_1.p = dropout self.dropout_2.p = dropout class AverageAttentionNew(nn.Module): """ Average Attention module from "Accelerating Neural Transformer via an Average Attention Network" :cite:`DBLP:journals/corr/abs-1805-00631`. Args: model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter """ def __init__(self, model_dim, dropout=0.1, aan_useffn=False): self.model_dim = model_dim self.aan_useffn = aan_useffn super(AverageAttentionNew, self).__init__() if aan_useffn: self.average_layer = PositionwiseFeedForward(model_dim, model_dim, dropout) self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2) def cumulative_average_mask(self, batch_size, inputs_len, device): """ Builds the mask to compute the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3 Args: batch_size (int): batch size inputs_len (int): length of the inputs Returns: (FloatTensor): * A Tensor of shape ``(batch_size, input_len, input_len)`` """ triangle = torch.tril(torch.ones(inputs_len, inputs_len, dtype= torch.float, device=device)) weights = torch.ones(1, inputs_len, dtype=torch.float, device=device ) / torch.arange(1, inputs_len + 1, dtype=torch.float, device= device) mask = triangle * weights.transpose(0, 1) return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) def cumulative_average(self, inputs, mask_or_step, layer_cache=None, step=None): """ Computes the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6) Args: inputs (FloatTensor): sequence to average ``(batch_size, input_len, dimension)`` mask_or_step: if cache is set, this is assumed to be the current step of the dynamic decoding. Otherwise, it is the mask matrix used to compute the cumulative average. layer_cache: a dictionary containing the cumulative average of the previous step. Returns: a tensor of the same shape and type as ``inputs``. """ if layer_cache is not None: step = mask_or_step average_attention = (inputs + step * layer_cache['prev_g']) / (step + 1) layer_cache['prev_g'] = average_attention return average_attention else: mask = mask_or_step return torch.matmul(mask, inputs) def forward(self, input_0): primals_2 = self.gating_layer.weight primals_3 = self.gating_layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
Zer0-dev115/OpenNMT-py
AverageAttention
false
12,084
[ "MIT" ]
0
028c76b34779223ee6b3eb224b99617552987100
https://github.com/Zer0-dev115/OpenNMT-py/tree/028c76b34779223ee6b3eb224b99617552987100
BatchLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/bh/cbhpcgxjg3mwo4dulstw5ie26none2yzi5sysdzl34cu6pyah4fg.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add, aten.view] # Source node to ATen node mapping: # output_1 => add, view_3 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) triton_poi_fused_add_view_0 = async_compile.triton('triton_poi_fused_add_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_add_view_0.run(buf2, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from collections import OrderedDict class MetaModule(nn.Module): """ Base class for PyTorch meta-learning modules. These modules accept an additional argument `params` in their `forward` method. Notes ----- Objects inherited from `MetaModule` are fully compatible with PyTorch modules from `torch.nn.Module`. The argument `params` is a dictionary of tensors, with full support of the computation graph (for differentiation). """ def meta_named_parameters(self, prefix='', recurse=True): gen = self._named_members(lambda module: module._parameters.items() if isinstance(module, MetaModule) else [], prefix=prefix, recurse= recurse) for elem in gen: yield elem def meta_parameters(self, recurse=True): for name, param in self.meta_named_parameters(recurse=recurse): yield param class BatchLinear(nn.Linear, MetaModule): """A linear meta-layer that can deal with batched weight matrices and biases, as for instance output by a hypernetwork.""" __doc__ = nn.Linear.__doc__ def forward(self, input, params=None): if params is None: params = OrderedDict(self.named_parameters()) bias = params.get('bias', None) weight = params['weight'] output = input.matmul(weight.permute(*[i for i in range(len(weight. shape) - 2)], -1, -2)) output += bias.unsqueeze(-2) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_view_0[grid(256)](buf2, primals_2, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_2 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MetaModule(nn.Module): """ Base class for PyTorch meta-learning modules. These modules accept an additional argument `params` in their `forward` method. Notes ----- Objects inherited from `MetaModule` are fully compatible with PyTorch modules from `torch.nn.Module`. The argument `params` is a dictionary of tensors, with full support of the computation graph (for differentiation). """ def meta_named_parameters(self, prefix='', recurse=True): gen = self._named_members(lambda module: module._parameters.items() if isinstance(module, MetaModule) else [], prefix=prefix, recurse= recurse) for elem in gen: yield elem def meta_parameters(self, recurse=True): for name, param in self.meta_named_parameters(recurse=recurse): yield param class BatchLinearNew(nn.Linear, MetaModule): """A linear meta-layer that can deal with batched weight matrices and biases, as for instance output by a hypernetwork.""" __doc__ = nn.Linear.__doc__ def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
aneesh-dandime/siren
BatchLinear
false
12,085
[ "MIT" ]
0
7bc652e32d66c5792d24e8df2fffa565157679bd
https://github.com/aneesh-dandime/siren/tree/7bc652e32d66c5792d24e8df2fffa565157679bd
BertTextPooler
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone] # Source node to ATen node mapping: # pooled_output => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xk/cxkfjvxcrwrocrik25vel4gb2spp4jrbijo33ra4mgkw3hn2qgah.py # Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # pooled_output => add # pooled_output_1 => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_1.run(buf2, primals_3, buf3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class BertTextPooler(nn.Module): def __init__(self, config): super(BertTextPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.bi_hidden_size) self.activation = nn.ReLU() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, bi_hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf2, primals_3, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3 class BertTextPoolerNew(nn.Module): def __init__(self, config): super(BertTextPoolerNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.bi_hidden_size) self.activation = nn.ReLU() def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
amitakamath/vilbert-multi-task
BertTextPooler
false
12,086
[ "MIT" ]
0
5a11b8265fab3598fcdcd7f7c33453b914d8ff2c
https://github.com/amitakamath/vilbert-multi-task/tree/5a11b8265fab3598fcdcd7f7c33453b914d8ff2c
IOUloss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/in/cinwj7sv5v7wr35grnxco6x2u333pkogkh4ixr63euwbqwyfjen7.py # Topologically Sorted Source Nodes: [area_p, area_g, add_2, truediv_2, add, truediv_3, add_1, br, truediv, sub, truediv_1, sub_1, tl, sub_2, prod_3, lt, type_1, en, area_i, area_u, add_3, iou, pow_1, loss], Original ATen: [aten.prod, aten.add, aten.div, aten.minimum, aten.sub, aten.maximum, aten.lt, aten._to_copy, aten.mul, aten.pow, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # area_g => prod_1 # area_i => mul # area_p => prod # area_u => sub_3 # br => minimum # en => prod_2 # iou => div_4 # loss => sub_4 # lt => lt # pow_1 => pow_1 # prod_3 => prod_3 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # tl => maximum # truediv => div # truediv_1 => div_1 # truediv_2 => div_2 # truediv_3 => div_3 # type_1 => convert_element_type # Graph fragment: # %prod : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%slice_18, 1), kwargs = {}) # %prod_1 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%slice_20, 1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%prod, %prod_1), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_12, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_10, %div_2), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_16, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_14, %div_3), kwargs = {}) # %minimum : [num_users=2] = call_function[target=torch.ops.aten.minimum.default](args = (%add, %add_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_4, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_2, %div), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_8, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_6, %div_1), kwargs = {}) # %maximum : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%sub, %sub_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %maximum), kwargs = {}) # %prod_3 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%sub_2, 1), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Tensor](args = (%maximum, %minimum), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float32), kwargs = {}) # %prod_2 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%convert_element_type, 1), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%prod_3, %prod_2), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %mul), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_3, 1e-16), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_3), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div_4, 2), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_1), kwargs = {}) triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0 = async_compile.triton('triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp6 * tmp2 tmp8 = tmp5 + tmp7 tmp9 = triton_helpers.minimum(tmp4, tmp8) tmp10 = tmp0 - tmp3 tmp11 = tmp5 - tmp7 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp9 - tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tmp18 + tmp20 tmp22 = triton_helpers.minimum(tmp17, tmp21) tmp23 = tmp14 - tmp16 tmp24 = tmp18 - tmp20 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = tmp22 - tmp25 tmp27 = tmp13 * tmp26 tmp28 = tmp12 < tmp9 tmp29 = tmp28.to(tl.float32) tmp30 = tmp25 < tmp22 tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 * tmp31 tmp33 = tmp27 * tmp32 tmp34 = tmp1 * tmp15 tmp35 = tmp6 * tmp19 tmp36 = tmp34 + tmp35 tmp37 = tmp36 - tmp33 tmp38 = 1e-16 tmp39 = tmp37 + tmp38 tmp40 = tmp33 / tmp39 tmp41 = tmp40 * tmp40 tmp42 = 1.0 tmp43 = tmp42 - tmp41 tl.store(in_out_ptr0 + (x0), tmp43, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, ), (1, ), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [area_p, area_g, add_2, truediv_2, add, truediv_3, add_1, br, truediv, sub, truediv_1, sub_1, tl, sub_2, prod_3, lt, type_1, en, area_i, area_u, add_3, iou, pow_1, loss], Original ATen: [aten.prod, aten.add, aten.div, aten.minimum, aten.sub, aten.maximum, aten.lt, aten._to_copy, aten.mul, aten.pow, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0.run(buf1, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class IOUloss(nn.Module): def __init__(self, reduction='none', loss_type='iou'): super(IOUloss, self).__init__() self.reduction = reduction self.loss_type = loss_type def forward(self, pred, target): assert pred.shape[0] == target.shape[0] pred = pred.view(-1, 4) target = target.view(-1, 4) tl = torch.max(pred[:, :2] - pred[:, 2:] / 2, target[:, :2] - target[:, 2:] / 2) br = torch.min(pred[:, :2] + pred[:, 2:] / 2, target[:, :2] + target[:, 2:] / 2) area_p = torch.prod(pred[:, 2:], 1) area_g = torch.prod(target[:, 2:], 1) en = (tl < br).type(tl.type()).prod(dim=1) area_i = torch.prod(br - tl, 1) * en area_u = area_p + area_g - area_i iou = area_i / (area_u + 1e-16) if self.loss_type == 'iou': loss = 1 - iou ** 2 elif self.loss_type == 'giou': c_tl = torch.min(pred[:, :2] - pred[:, 2:] / 2, target[:, :2] - target[:, 2:] / 2) c_br = torch.max(pred[:, :2] + pred[:, 2:] / 2, target[:, :2] + target[:, 2:] / 2) area_c = torch.prod(c_br - c_tl, 1) giou = iou - (area_c - area_u) / area_c.clamp(1e-16) loss = 1 - giou.clamp(min=-1.0, max=1.0) if self.reduction == 'mean': loss = loss.mean() elif self.reduction == 'sum': loss = loss.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp6 * tmp2 tmp8 = tmp5 + tmp7 tmp9 = triton_helpers.minimum(tmp4, tmp8) tmp10 = tmp0 - tmp3 tmp11 = tmp5 - tmp7 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp9 - tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tmp18 + tmp20 tmp22 = triton_helpers.minimum(tmp17, tmp21) tmp23 = tmp14 - tmp16 tmp24 = tmp18 - tmp20 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = tmp22 - tmp25 tmp27 = tmp13 * tmp26 tmp28 = tmp12 < tmp9 tmp29 = tmp28.to(tl.float32) tmp30 = tmp25 < tmp22 tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 * tmp31 tmp33 = tmp27 * tmp32 tmp34 = tmp1 * tmp15 tmp35 = tmp6 * tmp19 tmp36 = tmp34 + tmp35 tmp37 = tmp36 - tmp33 tmp38 = 1e-16 tmp39 = tmp37 + tmp38 tmp40 = tmp33 / tmp39 tmp41 = tmp40 * tmp40 tmp42 = 1.0 tmp43 = tmp42 - tmp41 tl.store(in_out_ptr0 + x0, tmp43, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64,), (1,), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0[ grid(64)](buf1, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class IOUlossNew(nn.Module): def __init__(self, reduction='none', loss_type='iou'): super(IOUlossNew, self).__init__() self.reduction = reduction self.loss_type = loss_type def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ankandrew/YOLOX
IOUloss
false
12,087
[ "Apache-2.0" ]
0
28da975944887d550f052ebadd8cbdd82d14aed6
https://github.com/ankandrew/YOLOX/tree/28da975944887d550f052ebadd8cbdd82d14aed6
Correct
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/nw/cnwgisju4h5iwbbibpm7ry7jyqdrctyoxqysjbzmmiwisfsn62pt.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => max_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, 1), kwargs = {}) triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + (x2), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pk/cpkkxgnrsyqy34nnskxhxcnh47sec36ptyhoqi5lyov2gukvlu4b.py # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] # Source node to ATen node mapping: # eq => eq # Graph fragment: # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%getitem_1, %arg1_1), kwargs = {}) triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 == tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] triton_poi_fused_eq_1.run(buf0, arg1_1, buf1, 256, grid=grid(256), stream=stream0) del arg1_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data.distributed class Correct(nn.Module): def forward(self, classifier, target): return classifier.max(dim=1)[1] == target def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x2, tmp46, xmask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 == tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(256)](buf0, arg1_1, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg1_1 del buf0 return buf1, class CorrectNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
amitport/grace
Correct
false
12,088
[ "BSD-2-Clause" ]
0
b0e442057d2f36f09cd1817a4acb966c6b0b780f
https://github.com/amitport/grace/tree/b0e442057d2f36f09cd1817a4acb966c6b0b780f
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/op/coptu6xep3awc4lajb4xivopppqmjtx3zy7ebtazm45rqvyeknds.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = (xindex // 1200) x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask) tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/as/casrc7bf7ghsendgi7tkqxk3hj4ic6aqb4rmkxzuk5dhbidznia7.py # Topologically Sorted Source Nodes: [x_1, linear_2], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # linear_2 => view_4 # x_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {}) triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = (xindex // 300) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ys/cys253tikbbdgdanmkbmksr3535u6u5hakjjivpuhm3tm6w335xs.py # Topologically Sorted Source Nodes: [tanh, x_2], Original ATen: [aten.tanh, aten.mul] # Source node to ATen node mapping: # tanh => tanh # x_2 => mul # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 4), kwargs = {}) triton_poi_fused_mul_tanh_3 = async_compile.triton('triton_poi_fused_mul_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_tanh_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300, ), (1, )) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 25600, grid=grid(25600), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf7, 19200, grid=grid(19200), stream=stream0) del primals_5 buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1, linear_2], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0) del buf3 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, x_2], Original ATen: [aten.tanh, aten.mul] triton_poi_fused_mul_tanh_3.run(buf5, buf6, 256, grid=grid(256), stream=stream0) return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, buf5, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.l1 = nn.Linear(state_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, x): x = F.relu(self.l1(x)) x = F.relu(self.l2(x)) x = self.max_action * torch.tanh(self.l3(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_mul_tanh_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300,), (1,)) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf8, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2, primals_5, buf3, buf7, 19200, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK =256, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_tanh_3[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), buf4, buf5, primals_6, buf7, primals_4, buf8 class ActorNew(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(ActorNew, self).__init__() self.l1 = nn.Linear(state_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
SheepiesLab/plato
Actor
false
12,089
[ "Apache-2.0" ]
0
9f5bbfa4b6952d1b3af24be409982d303d54a169
https://github.com/SheepiesLab/plato/tree/9f5bbfa4b6952d1b3af24be409982d303d54a169
QNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat] # Source node to ATen node mapping: # xu => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x1 => relu # Graph fragment: # %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, ), (1, )) assert_size_stride(primals_9, (4, 8), (8, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (1, 4), (4, 1)) assert_size_stride(primals_14, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf4, primals_6, 16, grid=grid(16), stream=stream0) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x1_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), out=buf7) del primals_9 buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf8, primals_10, 16, grid=grid(16), stream=stream0) del primals_10 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [x2_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf10, primals_12, 16, grid=grid(16), stream=stream0) del primals_12 buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x2_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(primals_13, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_14 return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class QNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim): super(QNetwork, self).__init__() self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.linear3 = nn.Linear(hidden_dim, 1) self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim) self.linear5 = nn.Linear(hidden_dim, hidden_dim) self.linear6 = nn.Linear(hidden_dim, 1) self.apply(weights_init_) def forward(self, state, action): xu = torch.cat([state, action], 1) x1 = F.relu(self.linear1(xu)) x1 = F.relu(self.linear2(x1)) x1 = self.linear3(x1) x2 = F.relu(self.linear4(xu)) x2 = F.relu(self.linear5(x2)) x2 = self.linear6(x2) return x1, x2 def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) assert_size_stride(primals_9, (4, 8), (8, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (1, 4), (4, 1)) assert_size_stride(primals_14, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4 ), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 4), (1, 8 ), 0), out=buf7) del primals_9 buf8 = buf7 del buf7 triton_poi_fused_relu_1[grid(16)](buf8, primals_10, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_relu_1[grid(16)](buf10, primals_12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_14, buf10, reinterpret_tensor( primals_13, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_14 return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5) def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class QNetworkNew(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim): super(QNetworkNew, self).__init__() self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.linear3 = nn.Linear(hidden_dim, 1) self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim) self.linear5 = nn.Linear(hidden_dim, hidden_dim) self.linear6 = nn.Linear(hidden_dim, 1) self.apply(weights_init_) def forward(self, input_0, input_1): primals_3 = self.linear1.weight primals_4 = self.linear1.bias primals_1 = self.linear2.weight primals_6 = self.linear2.bias primals_7 = self.linear3.weight primals_8 = self.linear3.bias primals_9 = self.linear4.weight primals_10 = self.linear4.bias primals_2 = self.linear5.weight primals_12 = self.linear5.bias primals_13 = self.linear6.weight primals_14 = self.linear6.bias primals_5 = input_0 primals_11 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0], output[1]
SheepiesLab/plato
QNetwork
false
12,090
[ "Apache-2.0" ]
0
9f5bbfa4b6952d1b3af24be409982d303d54a169
https://github.com/SheepiesLab/plato/tree/9f5bbfa4b6952d1b3af24be409982d303d54a169
UniverseHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/lj/cljsrijowtnbzjiaduzofcdoinvzdk5u5wmgoeazwij2pzm2kbk4.py # Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d => convolution # output => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 73728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 576) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kf/ckfommqp6td2htzxbo7aalzevwjdpuh4own6yrbvx5ptwuz4n56b.py # Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # output_1 => expm1_1, gt_1, mul_3, mul_5, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {}) triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 144) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/bt/cbt77j7twfbpu3wpdzmljiocadksb5szmmxygm3o2ha3spz4lwww.py # Topologically Sorted Source Nodes: [conv2d_2, output_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # output_2 => expm1_2, gt_2, mul_6, mul_8, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_6,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_6, %mul_8), kwargs = {}) triton_poi_fused_convolution_elu_2 = async_compile.triton('triton_poi_fused_convolution_elu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 36) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zg/czgxjv4bk2urqrxzxloc27znxlr5uvv7ynbnb7frt3vqviqwdwlz.py # Topologically Sorted Source Nodes: [conv2d_3, output_3], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # output_3 => expm1_3, gt_3, mul_11, mul_9, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 1.0), kwargs = {}) # %expm1_3 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_9,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_3, 1.0), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %mul_9, %mul_11), kwargs = {}) triton_poi_fused_convolution_elu_3 = async_compile.triton('triton_poi_fused_convolution_elu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 9) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 48, 48), (9216, 2304, 48, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 24, 24), (18432, 576, 24, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 32, 24, 24), (18432, 576, 24, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_elu_0.run(buf1, primals_2, buf2, 73728, grid=grid(73728), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 12, 12), (4608, 144, 12, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 32, 12, 12), (4608, 144, 12, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_1.run(buf4, primals_5, buf5, 18432, grid=grid(18432), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 6, 6), (1152, 36, 6, 1)) buf7 = buf6; del buf6 # reuse buf8 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, output_2], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_2.run(buf7, primals_7, buf8, 4608, grid=grid(4608), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 32, 3, 3), (288, 9, 3, 1)) buf10 = buf9; del buf9 # reuse buf11 = empty_strided_cuda((4, 32, 3, 3), (288, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_3, output_3], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_3.run(buf10, primals_9, buf11, 1152, grid=grid(1152), stream=stream0) del primals_9 return (reinterpret_tensor(buf11, (4, 288), (288, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf5, buf7, buf8, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 48, 48), (9216, 2304, 48, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class UniverseHead(torch.nn.Module): """ universe agent example input: [None, 42, 42, 1]; output: [None, 288]; """ def __init__(self, n): super(UniverseHead, self).__init__() self.conv1 = nn.Conv2d(n, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.conv3 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.conv4 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.output_size = 288 def forward(self, state): output = F.elu(self.conv1(state)) output = F.elu(self.conv2(output)) output = F.elu(self.conv3(output)) output = F.elu(self.conv4(output)) return output.view(-1, self.output_size) def get_inputs(): return [torch.rand([4, 4, 48, 48])] def get_init_inputs(): return [[], {'n': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 576 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 144 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_elu_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 48, 48), (9216, 2304, 48, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 24, 24), (18432, 576, 24, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 32, 24, 24), (18432, 576, 24, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(73728)](buf1, primals_2, buf2, 73728, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 12, 12), (4608, 144, 12, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 32, 12, 12), (4608, 144, 12, 1), torch.float32) triton_poi_fused_convolution_elu_1[grid(18432)](buf4, primals_5, buf5, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 6, 6), (1152, 36, 6, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch. float32) triton_poi_fused_convolution_elu_2[grid(4608)](buf7, primals_7, buf8, 4608, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf9 = extern_kernels.convolution(buf8, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 32, 3, 3), (288, 9, 3, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 32, 3, 3), (288, 9, 3, 1), torch.float32 ) triton_poi_fused_convolution_elu_3[grid(1152)](buf10, primals_9, buf11, 1152, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return (reinterpret_tensor(buf11, (4, 288), (288, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf5, buf7, buf8, buf10) class UniverseHeadNew(torch.nn.Module): """ universe agent example input: [None, 42, 42, 1]; output: [None, 288]; """ def __init__(self, n): super(UniverseHeadNew, self).__init__() self.conv1 = nn.Conv2d(n, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.conv3 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.conv4 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) self.output_size = 288 def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
andy920262/pytorch-a2c-ppo-acktr
UniverseHead
false
12,091
[ "MIT" ]
0
2e7e85219dfe737cb4036de3cf0c8b00706d640e
https://github.com/andy920262/pytorch-a2c-ppo-acktr/tree/2e7e85219dfe737cb4036de3cf0c8b00706d640e
DQN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3s/c3swieudbq6hlerbvzxffejbssgds5rhgg5xk7zaijlgerjj4eqy.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/oa/coaokmevnfspxti5lsga2nucnhcbvids7fnbi3a75qm6efjnqqnt.py # Topologically Sorted Source Nodes: [out_3, view], Original ATen: [aten.relu, aten.view, aten.threshold_backward] # Source node to ATen node mapping: # out_3 => relu_1 # view => view # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [4, -1]), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_view_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_view_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (50, 50), (50, 1)) assert_size_stride(primals_5, (50, ), (1, )) assert_size_stride(primals_6, (4, 50), (50, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 50), (50, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_2, 200, grid=grid(200), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 50), (50, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (50, 50), (1, 50), 0), out=buf2) buf3 = empty_strided_cuda((4, 50), (50, 1), torch.float32) buf5 = empty_strided_cuda((4, 50), (50, 1), torch.bool) # Topologically Sorted Source Nodes: [out_3, view], Original ATen: [aten.relu, aten.view, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_view_1.run(buf2, primals_5, buf3, buf5, 200, grid=grid(200), stream=stream0) del buf2 del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (50, 4), (1, 50), 0), alpha=1, beta=1, out=buf4) del primals_7 return (buf4, primals_3, buf1, buf3, primals_6, buf5, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((50, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((50, 50), (50, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 50), (50, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DQN(nn.Module): def __init__(self, state_dim, nb_actions, hidden1=50, hidden2=50): super(DQN, self).__init__() self.fc1 = nn.Linear(state_dim, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, nb_actions) self.relu = nn.ReLU() self.tanh = nn.Tanh() def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) out = self.relu(out) return self.fc3(out.view(out.size(0), -1)) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'nb_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_view_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (50, 50), (50, 1)) assert_size_stride(primals_5, (50,), (1,)) assert_size_stride(primals_6, (4, 50), (50, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(200)](buf1, primals_2, 200, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (50, 50), (1, 50), 0), out=buf2) buf3 = empty_strided_cuda((4, 50), (50, 1), torch.float32) buf5 = empty_strided_cuda((4, 50), (50, 1), torch.bool) triton_poi_fused_relu_threshold_backward_view_1[grid(200)](buf2, primals_5, buf3, buf5, 200, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (50, 4), (1, 50), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_3, buf1, buf3, primals_6, buf5, primals_4 class DQNNew(nn.Module): def __init__(self, state_dim, nb_actions, hidden1=50, hidden2=50): super(DQNNew, self).__init__() self.fc1 = nn.Linear(state_dim, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, nb_actions) self.relu = nn.ReLU() self.tanh = nn.Tanh() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
alvinwan/explore
DQN
false
12,092
[ "MIT" ]
0
358c076b8250f561394e32b1ee2de9bc5562dcdb
https://github.com/alvinwan/explore/tree/358c076b8250f561394e32b1ee2de9bc5562dcdb
Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ac/cac3fsg2etmtbrojzasevhhi6adzjv3vdon2r6addczf27vwsi2g.py # Topologically Sorted Source Nodes: [x_2, out], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out => relu_1 # x_2 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2, out], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf4, 256, grid=grid(256), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class Block(torch.nn.Module): def __init__(self, in_channels, mid_channel, out_channels, batch_norm=False ): super().__init__() self.conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels= mid_channel, kernel_size=3, padding=1) self.conv2 = torch.nn.Conv2d(in_channels=mid_channel, out_channels= out_channels, kernel_size=3, padding=1) self.batch_norm = batch_norm if batch_norm: self.bn1 = torch.nn.BatchNorm2d(mid_channel) self.bn2 = torch.nn.BatchNorm2d(out_channels) def forward(self, x): x = self.conv1(x) if self.batch_norm: x = self.bn1(x) x = torch.nn.ReLU(inplace=True)(x) x = self.conv2(x) if self.batch_norm: x = self.bn2(x) out = torch.nn.ReLU(inplace=True)(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'mid_channel': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(256)](buf3, primals_5, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf4 class BlockNew(torch.nn.Module): def __init__(self, in_channels, mid_channel, out_channels, batch_norm=False ): super().__init__() self.conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels= mid_channel, kernel_size=3, padding=1) self.conv2 = torch.nn.Conv2d(in_channels=mid_channel, out_channels= out_channels, kernel_size=3, padding=1) self.batch_norm = batch_norm if batch_norm: self.bn1 = torch.nn.BatchNorm2d(mid_channel) self.bn2 = torch.nn.BatchNorm2d(out_channels) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
amrane99/lung-segmentation
Block
false
12,093
[ "MIT" ]
0
ab29db75ac78918da5cbf66b830acaf36cf7b44a
https://github.com/amrane99/lung-segmentation/tree/ab29db75ac78918da5cbf66b830acaf36cf7b44a
ConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/v6/cv6mztg47i3yp2mxnxk4ai26mhqlyntfg7qgsv3xjf7yzaq3fxru.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zb/czbnudz35jsg6mj3y43nz7tlz5okjv3pmg33uguvkcha2deejqco.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 900) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xx/cxxg4h3myv3a2bchnjad2cgzdh43zl764tbnb2zicliquavr3pel.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 86528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 676) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sk/cskrks25gn6uuy6errnjcoehapj4oljainfzp266kkfs7xgfpix4.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 169) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/fd/cfd6gu353a4kyrwhpht7rz47jbhddrdy5ltjm4bjnj3fvf4uoafl.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_4 = async_compile.triton('triton_per_fused__log_softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_4', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_4(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel): xnumel = 4 XBLOCK: tl.constexpr = 1 rnumel = 1000 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1000*x0)), rmask, other=0.0) tmp1 = tl.load(in_ptr1 + ((r1 // 100)), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.where(rmask, tmp3, float("-inf")) tmp6 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp5, 0)) tmp7 = tmp2 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = tl.where(rmask, tmp9, 0) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp13 = tl_math.log(tmp12) tmp14 = tmp7 - tmp13 tl.store(out_ptr2 + (r1 + (1000*x0)), tmp14, rmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (8, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (10, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 60, 60), (28800, 3600, 60, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 115200, grid=grid(115200), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 30, 30), (14400, 900, 30, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 57600, grid=grid(57600), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 26, 26), (21632, 676, 26, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 86528, grid=grid(86528), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 13, 13), (10816, 169, 13, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf7, primals_9, 43264, grid=grid(43264), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 10, 10, 10), (1000, 100, 10, 1)) buf11 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_4.run(buf8, primals_11, buf11, 4, 1000, grid=grid(4), stream=stream0) del buf8 del primals_11 return (buf11, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ConvNet(nn.Module): """ convolutional neural network """ def __init__(self): super(ConvNet, self).__init__() nf = 8 self.conv1 = nn.Conv2d(1, nf * 1, 5, 1, 0) self.conv2 = nn.Conv2d(nf * 1, nf * 2, 4, 2, 1) self.conv3 = nn.Conv2d(nf * 2, nf * 4, 5, 1, 0) self.conv4 = nn.Conv2d(nf * 4, nf * 8, 4, 2, 1) self.conv5 = nn.Conv2d(nf * 8, 10, 4, 1, 0) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = self.conv5(x) x = torch.flatten(x, 1) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 86528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 676 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 169 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_4(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 rnumel = 1000 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1000 * x0), rmask, other=0.0) tmp1 = tl.load(in_ptr1 + r1 // 100, rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.where(rmask, tmp3, float('-inf')) tmp6 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp5, 0)) tmp7 = tmp2 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = tl.where(rmask, tmp9, 0) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp13 = tl_math.log(tmp12) tmp14 = tmp7 - tmp13 tl.store(out_ptr2 + (r1 + 1000 * x0), tmp14, rmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (8, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (10, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 60, 60), (28800, 3600, 60, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(115200)](buf1, primals_2, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 30, 30), (14400, 900, 30, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(57600)](buf3, primals_5, 57600, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 26, 26), (21632, 676, 26, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(86528)](buf5, primals_7, 86528, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 13, 13), (10816, 169, 13, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(43264)](buf7, primals_9, 43264, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 10, 10, 10), (1000, 100, 10, 1)) buf11 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32) triton_per_fused__log_softmax_4[grid(4)](buf8, primals_11, buf11, 4, 1000, num_warps=8, num_stages=1) del buf8 del primals_11 return (buf11, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf11) class ConvNetNew(nn.Module): """ convolutional neural network """ def __init__(self): super(ConvNetNew, self).__init__() nf = 8 self.conv1 = nn.Conv2d(1, nf * 1, 5, 1, 0) self.conv2 = nn.Conv2d(nf * 1, nf * 2, 4, 2, 1) self.conv3 = nn.Conv2d(nf * 2, nf * 4, 5, 1, 0) self.conv4 = nn.Conv2d(nf * 4, nf * 8, 4, 2, 1) self.conv5 = nn.Conv2d(nf * 8, 10, 4, 1, 0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.conv5.weight primals_11 = self.conv5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
animeshbchowdhury/robust-pnr-time
ConvNet
false
12,094
[ "BSD-3-Clause" ]
0
301c5d973b8c024a85fdab915986ecf257e7698b
https://github.com/animeshbchowdhury/robust-pnr-time/tree/301c5d973b8c024a85fdab915986ecf257e7698b
NatureHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/sn/csnsms5tdtjok5uxcwcbko2ioqfann3pwnmkfhlujgvnsujd5bud.py # Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # output => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 1225) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/f4/cf4q74veoggsxdgdkl43ap6cyqfylpfk3qs7wdqoebyfzzb36dvw.py # Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # output_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/li/cliu5bmn72sneydat6uytfwmj6s6i5vhkwhge4crynwidoxqn4im.py # Topologically Sorted Source Nodes: [conv2d_2, output_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # output_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 196) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/w5/cw56suctqqm6gjmfvwdswm7oehumkd4sm6fqaujit3p5ts6ettdi.py # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # output_3 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (512, 1568), (1568, 1)) assert_size_stride(primals_9, (512, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 156800, grid=grid(156800), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 65536, grid=grid(65536), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 14, 14), (6272, 196, 14, 1)) buf5 = buf4; del buf4 # reuse buf9 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_2, output_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf5, primals_7, buf9, 25088, grid=grid(25088), stream=stream0) del primals_7 buf6 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0), reinterpret_tensor(primals_8, (1568, 512), (1, 1568), 0), out=buf6) buf7 = buf6; del buf6 # reuse buf8 = empty_strided_cuda((16, 512), (512, 1), torch.bool) # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf7, primals_9, buf8, 8192, grid=grid(8192), stream=stream0) del primals_9 return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf3, reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0), buf8, primals_8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 8, 8), (256, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 144, 144), (82944, 20736, 144, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 1568), (1568, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class NatureHead(torch.nn.Module): """ DQN Nature 2015 paper input: [None, 84, 84, 4]; output: [None, 3136] -> [None, 512]; """ def __init__(self, n): super(NatureHead, self).__init__() self.conv1 = nn.Conv2d(n, 32, kernel_size=(8, 8), stride=(4, 4)) self.conv2 = nn.Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2)) self.conv3 = nn.Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1)) self.dense = nn.Linear(32 * 7 * 7, 512) self.output_size = 512 def forward(self, state): output = F.relu(self.conv1(state)) output = F.relu(self.conv2(output)) output = F.relu(self.conv3(output)) output = F.relu(self.dense(output.view(-1, 32 * 7 * 7))) return output def get_inputs(): return [torch.rand([4, 4, 144, 144])] def get_init_inputs(): return [[], {'n': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1225 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 144, 144), (82944, 20736, 144, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (512, 1568), (1568, 1)) assert_size_stride(primals_9, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 35, 35), (39200, 1225, 35, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(156800)](buf1, primals_2, 156800, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 16, 16), (16384, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 14, 14), (6272, 196, 14, 1)) buf5 = buf4 del buf4 buf9 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(25088)]( buf5, primals_7, buf9, 25088, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_7 buf6 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0 ), reinterpret_tensor(primals_8, (1568, 512), (1, 1568), 0), out=buf6) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((16, 512), (512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(8192)](buf7, primals_9, buf8, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf3, reinterpret_tensor(buf5, (16, 1568), (1568, 1), 0), buf8, primals_8, buf9) class NatureHeadNew(torch.nn.Module): """ DQN Nature 2015 paper input: [None, 84, 84, 4]; output: [None, 3136] -> [None, 512]; """ def __init__(self, n): super(NatureHeadNew, self).__init__() self.conv1 = nn.Conv2d(n, 32, kernel_size=(8, 8), stride=(4, 4)) self.conv2 = nn.Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2)) self.conv3 = nn.Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1)) self.dense = nn.Linear(32 * 7 * 7, 512) self.output_size = 512 def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.dense.weight primals_9 = self.dense.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
andy920262/pytorch-a2c-ppo-acktr
NatureHead
false
12,095
[ "MIT" ]
0
2e7e85219dfe737cb4036de3cf0c8b00706d640e
https://github.com/andy920262/pytorch-a2c-ppo-acktr/tree/2e7e85219dfe737cb4036de3cf0c8b00706d640e
SelfAttnPooler
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/hh/chhkclxtvzx7zmabgwvk3gwd5ocfilajncbyg4kvxdjw65zm6abk.py # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # setitem => full_default, index_put # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%squeeze, [%primals_4], %full_default), kwargs = {}) triton_poi_fused_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_0', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = float("-inf") tl.store(out_ptr0 + (x0 + (4*tmp4)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/nd/cndbfiage74onttz7jpowcwhqdq3zcfpbpyk423a5cv5a2dawlmr.py # Topologically Sorted Source Nodes: [attn_weights_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_weights_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uu/cuut7kbpxfunuwux2in22rhkmy6qi2z6n53xsurrlwvmexxlrf3j.py # Topologically Sorted Source Nodes: [attn_weights_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_weights_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] stream0 = get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0.run(primals_4, buf1, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_weights_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf3, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_weights_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (1, 0, 4), 0), reinterpret_tensor(primals_3, (4, 4, 4), (4, 16, 1), 0), out=buf5) del buf4 return (reinterpret_tensor(buf5, (4, 4), (4, 1), 0), primals_3, primals_4, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SelfAttnPooler(nn.Module): def __init__(self, input_dim): super().__init__() self.proj = nn.Linear(input_dim, 1) def forward(self, encoder_out, padding_mask): """ encoder_out: T, B, C padding_mask: T, B (True for padded positions) """ attn_weights = self.proj(encoder_out).squeeze(-1).float() if padding_mask is not None: attn_weights[padding_mask] = float('-inf') attn_weights = attn_weights.softmax(dim=0) out = torch.einsum('tb,tbc->bc', attn_weights.float(), encoder_out. float()) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.ones([4, 4, 4], dtype=torch.int64)] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = float('-inf') tl.store(out_ptr0 + (x0 + 4 * tmp4), tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0[grid(256)](primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (1, 0, 4), 0 ), reinterpret_tensor(primals_3, (4, 4, 4), (4, 16, 1), 0), out =buf5) del buf4 return reinterpret_tensor(buf5, (4, 4), (4, 1), 0 ), primals_3, primals_4, reinterpret_tensor(buf1, (4, 4), (4, 1), 0) class SelfAttnPoolerNew(nn.Module): def __init__(self, input_dim): super().__init__() self.proj = nn.Linear(input_dim, 1) def forward(self, input_0, input_1): primals_1 = self.proj.weight primals_2 = self.proj.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ankitapasad/slue-toolkit
SelfAttnPooler
false
12,096
[ "MIT" ]
0
db8155cf0fc803e21890cf4eee2ef87152aafbfc
https://github.com/ankitapasad/slue-toolkit/tree/db8155cf0fc803e21890cf4eee2ef87152aafbfc
DPSLTMAdapter
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/l4/cl4boort6vfsvh6h6bfd4lck36kbmtipkqcrnhckuuxer6sfib77.py # Topologically Sorted Source Nodes: [h_0s], Original ATen: [aten.zeros] # Source node to ATen node mapping: # h_0s => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_0 = async_compile.triton('triton_poi_fused_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sw/cswlmqxq57h53l6axfd6psb5uckvpd32zawfihpwp3s4owvqpcsb.py # Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward] # Source node to ATen node mapping: # c_t => add_1 # f_t => sigmoid_1 # g_t => tanh # h_t => mul_2 # i_t => sigmoid # mul => mul # mul_1 => mul_1 # o_t => sigmoid_2 # tanh_1 => tanh_1 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_4,), kwargs = {}) # %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_5,), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_6,), kwargs = {}) # %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_7,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %squeeze), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %mul_2 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {}) # %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_71 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_18), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = tl.sigmoid(tmp30) tmp33 = tmp31 * tmp32 tmp34 = tmp7 * tmp23 tmp35 = tmp33 + tmp34 tmp36 = 1.0 tmp37 = tmp36 - tmp31 tmp38 = tmp31 * tmp37 tmp39 = libdevice.tanh(tmp35) tmp40 = tmp15 * tmp39 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp23, xmask) tl.store(out_ptr3 + (x2), tmp35, xmask) tl.store(out_ptr4 + (x2), tmp38, xmask) tl.store(out_ptr5 + (x2), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/tl/ctlt2p573ni7yx4xldsz64sjwqkudoxkn6d6yvb2r54qxlmxds5p.py # Topologically Sorted Source Nodes: [i_t_1, f_t_1, g_t_1, o_t_1, mul_3, mul_4, c_t_1, tanh_3, h_t_1], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # c_t_1 => add_3 # f_t_1 => sigmoid_4 # g_t_1 => tanh_2 # h_t_1 => mul_5 # i_t_1 => sigmoid_3 # mul_3 => mul_3 # mul_4 => mul_4 # o_t_1 => sigmoid_5 # tanh_3 => tanh_3 # Graph fragment: # %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_8,), kwargs = {}) # %sigmoid_4 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_9,), kwargs = {}) # %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_10,), kwargs = {}) # %sigmoid_5 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_11,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_4, %add_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %tanh_2), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {}) # %mul_5 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_5, %tanh_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp25 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp28 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = tl.sigmoid(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = libdevice.tanh(tmp30) tmp33 = tmp15 * tmp32 tmp34 = tmp7 * tmp31 tmp35 = tmp33 + tmp34 tmp36 = libdevice.tanh(tmp35) tmp37 = tmp23 * tmp36 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp23, xmask) tl.store(out_ptr3 + (x2), tmp31, xmask) tl.store(out_ptr4 + (x2), tmp35, xmask) tl.store(out_ptr5 + (x2), tmp37, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ks/cksi7m4m7yxvsvd2ce7ftnos6exaibxyudnraey6nxajdfw246hj.py # Topologically Sorted Source Nodes: [i_t_3, f_t_3, g_t_3, mul_9, mul_10, c_t_3, tanh_7], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # c_t_3 => add_7 # f_t_3 => sigmoid_10 # g_t_3 => tanh_6 # i_t_3 => sigmoid_9 # mul_10 => mul_10 # mul_9 => mul_9 # tanh_7 => tanh_7 # Graph fragment: # %sigmoid_9 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_16,), kwargs = {}) # %sigmoid_10 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_17,), kwargs = {}) # %tanh_6 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_18,), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_10, %add_5), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_9, %tanh_6), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_10), kwargs = {}) # %tanh_7 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_7,), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp25 = tmp15 * tmp24 tmp26 = tmp7 * tmp23 tmp27 = tmp25 + tmp26 tmp28 = libdevice.tanh(tmp27) tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp23, xmask) tl.store(out_ptr3 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/o5/co5z4awunowwwrh5of536l26ozrjuwdhph5f2zpgdrfapcc3dduk.py # Topologically Sorted Source Nodes: [o_t_3], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # o_t_3 => sigmoid_11 # Graph fragment: # %sigmoid_11 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_19,), kwargs = {}) triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vm/cvmcreohmmmgtc2sngri4exy6k5t5hutelgjbv763z3d5cnripkm.py # Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack] # Source node to ATen node mapping: # h_n => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_2, %mul_5, %mul_8, %mul_11],), kwargs = {}) triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tl.where(tmp14, tmp15, tmp23) tmp25 = tl.where(tmp9, tmp10, tmp24) tmp26 = tl.where(tmp4, tmp5, tmp25) tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 1, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_0s], Original ATen: [aten.zeros] stream0 = get_raw_stream(0) triton_poi_fused_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward] triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1.run(buf1, primals_3, buf2, primals_5, buf0, buf3, buf5, buf4, buf6, buf32, buf7, 16, grid=grid(16), stream=stream0) buf8 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf8) buf9 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t_1, f_t_1, g_t_1, o_t_1, mul_3, mul_4, c_t_1, tanh_3, h_t_1], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf8, primals_3, buf9, primals_5, buf6, buf10, buf11, buf13, buf12, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf16) buf17 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf15, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t_2, f_t_2, g_t_2, o_t_2, mul_6, mul_7, c_t_2, tanh_5, h_t_2], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf16, primals_3, buf17, primals_5, buf14, buf18, buf19, buf21, buf20, buf22, buf23, 16, grid=grid(16), stream=stream0) buf24 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf24) del primals_2 buf25 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf23, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf25) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t_3, f_t_3, g_t_3, mul_9, mul_10, c_t_3, tanh_7], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_3.run(buf24, primals_3, buf25, primals_5, buf22, buf26, buf27, buf28, buf30, 16, grid=grid(16), stream=stream0) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [o_t_3], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_4.run(buf24, primals_3, buf25, primals_5, buf29, 16, grid=grid(16), stream=stream0) del buf24 del primals_3 del primals_5 buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack] triton_poi_fused_stack_5.run(buf7, buf15, buf23, buf29, buf30, buf31, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf3, buf4, buf5, buf6, buf7, buf10, buf11, buf12, buf13, buf14, buf15, buf18, buf19, buf20, buf21, buf22, buf23, buf26, buf27, buf28, buf29, buf30, primals_4, buf32, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from typing import Tuple from typing import List from typing import Optional from typing import Dict from typing import Union from torch.nn.modules.module import _IncompatibleKeys def filter_out_old_keys(self, state_dict, prefix, local_metadata): new_state_dict = {param_name: param_value for param_name, param_value in state_dict.items() if param_name not in self.old_to_new} return new_state_dict class LSTMLinear(nn.Linear): """ This function is the same as a nn.Linear layer, except that in the backward pass the grad_samples get accumulated (instead of being concatenated as in the standard nn.Linear) """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'bool'=True): super().__init__(in_features, out_features, bias) class DPLSTMCell(nn.Module): """ Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated applications of this class. """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias) self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias) self.reset_parameters() def reset_parameters(self): """ Resets parameters by initializing them from an uniform distribution. """ stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): nn.init.uniform_(weight, -stdv, stdv) def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]: gates = self.ih(x) + self.hh(h_prev) i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates, self.hidden_size, 1) i_t = torch.sigmoid(i_t_input) f_t = torch.sigmoid(f_t_input) g_t = torch.tanh(g_t_input) o_t = torch.sigmoid(o_t_input) c_t = f_t * c_prev + i_t * g_t h_t = o_t * torch.tanh(c_t) return h_t, c_t class DPLSTMLayer(nn.Module): """ Implements *one* layer of LSTM in a way amenable to differential privacy. We don't expect you to use this directly: use DPLSTM instead :) """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool', dropout: 'float', reverse: 'bool'=False): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.dropout = dropout self.reverse = reverse self.cell = DPLSTMCell(input_size=input_size, hidden_size= hidden_size, bias=bias) self.dropout_layer = nn.Dropout(dropout) if dropout > 0 else None def forward(self, x: 'torch.Tensor', state_init: 'Tuple[torch.Tensor, torch.Tensor]') ->Tuple[torch.Tensor, Tuple[ torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTMLayer when a sequence is given in input. Args: x: Input sequence to the DPLSTMCell of shape ``[T, B, D]``. state_init: Initial state of the LSTMCell as a tuple ``(h_0, c_0)`` where ``h_0`` is the initial hidden state and ``c_0`` is the initial cell state of the DPLSTMCell Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTMCell for each timestep ``t``. - ``h_n`` is of shape ``[B, H]`` and is a tensor containing the hidden state for ``t = T``. - ``c_n`` is of shape ``[B, H]`` tensor containing the cell state for ``t = T``. """ seq_length, _batch_sz, _ = x.shape if self.reverse: x = x.flip(0) x = torch.unbind(x, dim=0) h_0, c_0 = state_init h_n = [h_0] c_n = [c_0] for t in range(seq_length): h_next, c_next = self.cell(x[t], h_n[t], c_n[t]) if self.dropout: h_next = self.dropout_layer(h_next) h_n.append(h_next) c_n.append(c_next) h_n = torch.stack(h_n[1:], dim=0) return h_n.flip(0) if self.reverse else h_n, (h_n[-1], c_n[-1]) class BidirectionalDPLSTMLayer(nn.Module): """ Implements *one* layer of Bidirectional LSTM in a way amenable to differential privacy. We don't expect you to use this directly: use DPLSTM instead :) """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool', dropout: 'float'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.dropout = dropout self.forward_layer = DPLSTMLayer(input_size=input_size, hidden_size =hidden_size, bias=bias, dropout=dropout, reverse=False) self.reverse_layer = DPLSTMLayer(input_size=input_size, hidden_size =hidden_size, bias=bias, dropout=dropout, reverse=True) def forward(self, x: 'torch.Tensor', state_init: 'Tuple[torch.Tensor, torch.Tensor]') ->Tuple[torch.Tensor, Tuple[ torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTM when a sequence is input. Dimensions as follows: - B: Batch size - T: Sequence length - D: LSTM input hidden size (eg from a word embedding) - H: LSTM output hidden size - P: number of directions (2 if bidirectional, else 1) Args: x: Input sequence to the DPLSTM of shape ``[T, B, D]`` state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where: - h_0 of shape ``[P, B, H] contains the initial hidden state - c_0 of shape ``[P, B, H] contains the initial cell state This argument can be (and defaults to) None, in which case zero tensors will be used. Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H * P]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTM for each timestep ``t``. - ``h_n`` is of shape ``[P, B, H]`` and contains the hidden state for ``t = T``. - ``c_n`` is of shape ``[P, B, H]`` and contains the cell state for ``t = T``. """ h0, c0 = state_init h0_f, h0_r = h0.unbind(0) c0_f, c0_r = c0.unbind(0) out_f, (h_f, c_f) = self.forward_layer(x, (h0_f, c0_f)) out_r, (h_r, c_r) = self.reverse_layer(x, (h0_r, c0_r)) out = torch.cat([out_f, out_r], dim=-1) h = torch.stack([h_f, h_r], dim=0) c = torch.stack([c_f, c_r], dim=0) return out, (h, c) class ParamRenamedModule(nn.Module): """ This class defines a nn.Module whose parameters are renamed. This is useful when you want to reimplement a layer but make sure its state_dict and list of parameters are exactly the same as another reference layer so that you can have a drop-in replacement that does not depend on how your layer is actually implemented. In Opacus, this is used for DPLSTM, where our implementation leverages submodules and requires alignment to the state_dict of nn.LSTM. """ def __init__(self, rename_map: 'Dict[str, str]'): """ Initializes internal state. Subclass this instead of ``torch.nn.Module`` whenever you need to rename your model's state. Args: rename_map: mapping from old name -> new name for each parameter you want renamed. Note that this must be a 1:1 mapping! """ super().__init__() self.old_to_new = rename_map self.new_to_old = {v: k for k, v in rename_map.items()} self._register_state_dict_hook(filter_out_old_keys) def _register_renamed_parameters(self): """ Internal function. This function simply registers parameters under their new name. They will automatically mask their duplicates coming from submodules. This trick works because self.parameters() proceeds recursively from the top, going into submodules after processing items at the current level, and will not return duplicates. """ for old_name, param in super().named_parameters(): if old_name in self.old_to_new: new_name = self.old_to_new[old_name] self.register_parameter(new_name, param) def __setattr__(self, name: 'str', value: 'Union[Tensor, nn.Module]' ) ->None: """ Whenever you set an attribute, eg `self.linear`, this is called to actually register it in any nn.Module. We rely on the masking trick explained in the docs for ``_register_renamed_parameters`` to make sure we replace things only once. If a new parameter in the rename list is detected, we rename and mask it so next time this is called we will no longer find it. """ super().__setattr__(name, value) try: self._register_renamed_parameters() except AttributeError: pass def load_state_dict(self, state_dict: 'Dict[str, Tensor]', strict: 'bool'=True): """ Identical to ``torch.nn.Module.load_state_dict()`` but handles the renamed keys. """ missing_keys, unexpected_keys = super().load_state_dict(state_dict, strict=False) missing_keys = [k for k in missing_keys if k not in self.old_to_new] if strict: error_msgs = [] if len(unexpected_keys) > 0: error_msgs.insert(0, 'Unexpected key(s) in state_dict: {}. '.format(', '. join('"{}"'.format(k) for k in unexpected_keys))) if len(missing_keys) > 0: error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '. format(', '.join('"{}"'.format(k) for k in missing_keys))) if len(error_msgs) > 0: raise RuntimeError( 'Error(s) in loading state_dict for {}:\n\t{}'.format( self.__class__.__name__, '\n\t'.join(error_msgs))) return _IncompatibleKeys(missing_keys, unexpected_keys) class DPLSTM(ParamRenamedModule): """ DP-friendly drop-in replacement of the ``torch.nn.LSTM`` module. Its state_dict matches that of nn.LSTM exactly, so that after training it can be exported and loaded by an nn.LSTM for inference. Refer to nn.LSTM's documentation for all parameters and inputs. """ def __init__(self, input_size: 'int', hidden_size: 'int', num_layers: 'int'=1, bias: 'bool'=True, batch_first: 'bool'=False, dropout: 'float'=0, bidirectional: 'bool'=False): rename_dict = self._make_rename_dict(num_layers, bias, bidirectional) super().__init__(rename_dict) self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.bidirectional = bidirectional self.num_directions = 2 if self.bidirectional else 1 LayerClass = BidirectionalDPLSTMLayer if bidirectional else DPLSTMLayer self.layers = nn.ModuleList([LayerClass(input_size=self.input_size if i == 0 else self.hidden_size * self.num_directions, hidden_size =self.hidden_size, bias=self.bias, dropout=self.dropout if i < self.num_layers - 1 else 0) for i in range(num_layers)]) def forward(self, x: 'torch.Tensor', state_init: 'Optional[Tuple[torch.Tensor, torch.Tensor]]'=None) ->Tuple[torch. Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTM when a sequence is input. Dimensions as follows: - B: Batch size - T: Sequence length - D: LSTM input hidden size (eg from a word embedding) - H: LSTM output hidden size - L: number of layers in the LSTM - P: number of directions (2 if bidirectional, else 1) Args: x: Input sequence to the DPLSTM of shape ``[T, B, D]`` state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where: - h_0 of shape ``[L*P, B, H] contains the initial hidden state - c_0 of shape ``[L*P, B, H] contains the initial cell state This argument can be (and defaults to) None, in which case zero tensors will be used. Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H * P]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTM for each timestep ``t``. - ``h_n`` is of shape ``[L * P, B, H]`` and contains the hidden state for ``t = T``. - ``c_n`` is of shape ``[L * P, B, H]`` and contains the cell state for ``t = T``. """ x = self._rearrange_batch_dim(x) _T, B, _D = x.shape L = self.num_layers P = 2 if self.bidirectional else 1 H = self.hidden_size h_0s, c_0s = state_init or (None, None) if h_0s is None: h_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype, device=x[0].device) else: h_0s = h_0s.reshape([L, P, B, H]) if c_0s is None: c_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype, device=x[0].device) else: c_0s = c_0s.reshape([L, P, B, H]) hs: 'List[torch.Tensor]' = [] cs: 'List[torch.Tensor]' = [] for layer, h0, c0 in zip(self.layers, h_0s, c_0s): if not self.bidirectional: h0 = h0.squeeze(0) c0 = c0.squeeze(0) x, (h, c) = layer(x, (h0, c0)) if not self.bidirectional: h = h.unsqueeze(0) c = c.unsqueeze(0) hs.append(h) cs.append(c) hs = torch.cat(hs, dim=0) cs = torch.cat(cs, dim=0) out = self._rearrange_batch_dim(x) return out, (hs, cs) def _rearrange_batch_dim(self, x: 'torch.Tensor') ->torch.Tensor: if self.batch_first: x = x.transpose(0, 1) return x def __repr__(self): s = f'DPLSTM({self.input_size}, {self.hidden_size}, bias={self.bias}' if self.batch_first: s += f', batch_first={self.batch_first}' if self.num_layers > 1: s += f', num_layers={self.num_layers}' if self.dropout: s += f', dropout={self.dropout}' if self.bidirectional: s += f', bidirectional={self.bidirectional}' return s def _make_rename_dict(self, num_layers, bias, bidirectional): """ Programmatically constructs a dictionary old_name -> new_name to align with the param names used in ``torch.nn.LSTM``. """ d = {} components = ['weight'] + ['bias' if bias else []] matrices = ['ih', 'hh'] for i in range(num_layers): for c in components: for m in matrices: nn_name = f'{c}_{m}_l{i}' if bidirectional: d[f'layers.{i}.forward_layer.cell.{m}.{c}'] = nn_name d[f'layers.{i}.reverse_layer.cell.{m}.{c}' ] = nn_name + '_reverse' else: d[f'layers.{i}.cell.{m}.{c}'] = nn_name return d class DPSLTMAdapter(nn.Module): """ Adapter for DPLSTM. LSTM returns a tuple, but our testing tools need the model to return a single tensor in output. We do this adaption here. """ def __init__(self, *args, **kwargs): super().__init__() self.dplstm = DPLSTM(*args, **kwargs) def forward(self, x): out, _rest = self.dplstm(x) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import Tensor import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from typing import Tuple from typing import List from typing import Optional from typing import Dict from typing import Union from torch.nn.modules.module import _IncompatibleKeys assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = tl.sigmoid(tmp30) tmp33 = tmp31 * tmp32 tmp34 = tmp7 * tmp23 tmp35 = tmp33 + tmp34 tmp36 = 1.0 tmp37 = tmp36 - tmp31 tmp38 = tmp31 * tmp37 tmp39 = libdevice.tanh(tmp35) tmp40 = tmp15 * tmp39 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp23, xmask) tl.store(out_ptr3 + x2, tmp35, xmask) tl.store(out_ptr4 + x2, tmp38, xmask) tl.store(out_ptr5 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp25 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp28 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = tl.sigmoid(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = libdevice.tanh(tmp30) tmp33 = tmp15 * tmp32 tmp34 = tmp7 * tmp31 tmp35 = tmp33 + tmp34 tmp36 = libdevice.tanh(tmp35) tmp37 = tmp23 * tmp36 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp23, xmask) tl.store(out_ptr3 + x2, tmp31, xmask) tl.store(out_ptr4 + x2, tmp35, xmask) tl.store(out_ptr5 + x2, tmp37, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp25 = tmp15 * tmp24 tmp26 = tmp7 * tmp23 tmp27 = tmp25 + tmp26 tmp28 = libdevice.tanh(tmp27) tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp23, xmask) tl.store(out_ptr3 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tl.where(tmp14, tmp15, tmp23) tmp25 = tl.where(tmp9, tmp10, tmp24) tmp26 = tl.where(tmp4, tmp5, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 1, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf1 , primals_3, buf2, primals_5, buf0, buf3, buf5, buf4, buf6, buf32, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf2 del buf2 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf8) buf9 = buf1 del buf1 extern_kernels.mm(buf7, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf8, primals_3, buf9, primals_5, buf6, buf10, buf11, buf13, buf12, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf9 del buf9 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf16) buf17 = buf8 del buf8 extern_kernels.mm(buf15, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf16, primals_3, buf17, primals_5, buf14, buf18, buf19, buf21, buf20, buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = buf17 del buf17 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf24) del primals_2 buf25 = buf16 del buf16 extern_kernels.mm(buf23, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf25) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_3[grid(16)](buf24, primals_3, buf25, primals_5, buf22, buf26, buf27, buf28, buf30, 16, XBLOCK =16, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_sigmoid_4[grid(16)](buf24, primals_3, buf25, primals_5, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf24 del primals_3 del primals_5 buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0) del buf25 triton_poi_fused_stack_5[grid(64)](buf7, buf15, buf23, buf29, buf30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor( primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf3, buf4, buf5, buf6, buf7, buf10, buf11, buf12, buf13, buf14, buf15, buf18, buf19, buf20, buf21, buf22, buf23, buf26, buf27, buf28, buf29, buf30, primals_4, buf32) def filter_out_old_keys(self, state_dict, prefix, local_metadata): new_state_dict = {param_name: param_value for param_name, param_value in state_dict.items() if param_name not in self.old_to_new} return new_state_dict class LSTMLinear(nn.Linear): """ This function is the same as a nn.Linear layer, except that in the backward pass the grad_samples get accumulated (instead of being concatenated as in the standard nn.Linear) """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'bool'=True): super().__init__(in_features, out_features, bias) class DPLSTMCell(nn.Module): """ Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated applications of this class. """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias) self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias) self.reset_parameters() def reset_parameters(self): """ Resets parameters by initializing them from an uniform distribution. """ stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): nn.init.uniform_(weight, -stdv, stdv) def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]: gates = self.ih(x) + self.hh(h_prev) i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates, self.hidden_size, 1) i_t = torch.sigmoid(i_t_input) f_t = torch.sigmoid(f_t_input) g_t = torch.tanh(g_t_input) o_t = torch.sigmoid(o_t_input) c_t = f_t * c_prev + i_t * g_t h_t = o_t * torch.tanh(c_t) return h_t, c_t class DPLSTMLayer(nn.Module): """ Implements *one* layer of LSTM in a way amenable to differential privacy. We don't expect you to use this directly: use DPLSTM instead :) """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool', dropout: 'float', reverse: 'bool'=False): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.dropout = dropout self.reverse = reverse self.cell = DPLSTMCell(input_size=input_size, hidden_size= hidden_size, bias=bias) self.dropout_layer = nn.Dropout(dropout) if dropout > 0 else None def forward(self, x: 'torch.Tensor', state_init: 'Tuple[torch.Tensor, torch.Tensor]') ->Tuple[torch.Tensor, Tuple[ torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTMLayer when a sequence is given in input. Args: x: Input sequence to the DPLSTMCell of shape ``[T, B, D]``. state_init: Initial state of the LSTMCell as a tuple ``(h_0, c_0)`` where ``h_0`` is the initial hidden state and ``c_0`` is the initial cell state of the DPLSTMCell Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTMCell for each timestep ``t``. - ``h_n`` is of shape ``[B, H]`` and is a tensor containing the hidden state for ``t = T``. - ``c_n`` is of shape ``[B, H]`` tensor containing the cell state for ``t = T``. """ seq_length, _batch_sz, _ = x.shape if self.reverse: x = x.flip(0) x = torch.unbind(x, dim=0) h_0, c_0 = state_init h_n = [h_0] c_n = [c_0] for t in range(seq_length): h_next, c_next = self.cell(x[t], h_n[t], c_n[t]) if self.dropout: h_next = self.dropout_layer(h_next) h_n.append(h_next) c_n.append(c_next) h_n = torch.stack(h_n[1:], dim=0) return h_n.flip(0) if self.reverse else h_n, (h_n[-1], c_n[-1]) class BidirectionalDPLSTMLayer(nn.Module): """ Implements *one* layer of Bidirectional LSTM in a way amenable to differential privacy. We don't expect you to use this directly: use DPLSTM instead :) """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool', dropout: 'float'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.dropout = dropout self.forward_layer = DPLSTMLayer(input_size=input_size, hidden_size =hidden_size, bias=bias, dropout=dropout, reverse=False) self.reverse_layer = DPLSTMLayer(input_size=input_size, hidden_size =hidden_size, bias=bias, dropout=dropout, reverse=True) def forward(self, x: 'torch.Tensor', state_init: 'Tuple[torch.Tensor, torch.Tensor]') ->Tuple[torch.Tensor, Tuple[ torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTM when a sequence is input. Dimensions as follows: - B: Batch size - T: Sequence length - D: LSTM input hidden size (eg from a word embedding) - H: LSTM output hidden size - P: number of directions (2 if bidirectional, else 1) Args: x: Input sequence to the DPLSTM of shape ``[T, B, D]`` state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where: - h_0 of shape ``[P, B, H] contains the initial hidden state - c_0 of shape ``[P, B, H] contains the initial cell state This argument can be (and defaults to) None, in which case zero tensors will be used. Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H * P]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTM for each timestep ``t``. - ``h_n`` is of shape ``[P, B, H]`` and contains the hidden state for ``t = T``. - ``c_n`` is of shape ``[P, B, H]`` and contains the cell state for ``t = T``. """ h0, c0 = state_init h0_f, h0_r = h0.unbind(0) c0_f, c0_r = c0.unbind(0) out_f, (h_f, c_f) = self.forward_layer(x, (h0_f, c0_f)) out_r, (h_r, c_r) = self.reverse_layer(x, (h0_r, c0_r)) out = torch.cat([out_f, out_r], dim=-1) h = torch.stack([h_f, h_r], dim=0) c = torch.stack([c_f, c_r], dim=0) return out, (h, c) class ParamRenamedModule(nn.Module): """ This class defines a nn.Module whose parameters are renamed. This is useful when you want to reimplement a layer but make sure its state_dict and list of parameters are exactly the same as another reference layer so that you can have a drop-in replacement that does not depend on how your layer is actually implemented. In Opacus, this is used for DPLSTM, where our implementation leverages submodules and requires alignment to the state_dict of nn.LSTM. """ def __init__(self, rename_map: 'Dict[str, str]'): """ Initializes internal state. Subclass this instead of ``torch.nn.Module`` whenever you need to rename your model's state. Args: rename_map: mapping from old name -> new name for each parameter you want renamed. Note that this must be a 1:1 mapping! """ super().__init__() self.old_to_new = rename_map self.new_to_old = {v: k for k, v in rename_map.items()} self._register_state_dict_hook(filter_out_old_keys) def _register_renamed_parameters(self): """ Internal function. This function simply registers parameters under their new name. They will automatically mask their duplicates coming from submodules. This trick works because self.parameters() proceeds recursively from the top, going into submodules after processing items at the current level, and will not return duplicates. """ for old_name, param in super().named_parameters(): if old_name in self.old_to_new: new_name = self.old_to_new[old_name] self.register_parameter(new_name, param) def __setattr__(self, name: 'str', value: 'Union[Tensor, nn.Module]' ) ->None: """ Whenever you set an attribute, eg `self.linear`, this is called to actually register it in any nn.Module. We rely on the masking trick explained in the docs for ``_register_renamed_parameters`` to make sure we replace things only once. If a new parameter in the rename list is detected, we rename and mask it so next time this is called we will no longer find it. """ super().__setattr__(name, value) try: self._register_renamed_parameters() except AttributeError: pass def load_state_dict(self, state_dict: 'Dict[str, Tensor]', strict: 'bool'=True): """ Identical to ``torch.nn.Module.load_state_dict()`` but handles the renamed keys. """ missing_keys, unexpected_keys = super().load_state_dict(state_dict, strict=False) missing_keys = [k for k in missing_keys if k not in self.old_to_new] if strict: error_msgs = [] if len(unexpected_keys) > 0: error_msgs.insert(0, 'Unexpected key(s) in state_dict: {}. '.format(', '. join('"{}"'.format(k) for k in unexpected_keys))) if len(missing_keys) > 0: error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '. format(', '.join('"{}"'.format(k) for k in missing_keys))) if len(error_msgs) > 0: raise RuntimeError( 'Error(s) in loading state_dict for {}:\n\t{}'.format( self.__class__.__name__, '\n\t'.join(error_msgs))) return _IncompatibleKeys(missing_keys, unexpected_keys) class DPLSTM(ParamRenamedModule): """ DP-friendly drop-in replacement of the ``torch.nn.LSTM`` module. Its state_dict matches that of nn.LSTM exactly, so that after training it can be exported and loaded by an nn.LSTM for inference. Refer to nn.LSTM's documentation for all parameters and inputs. """ def __init__(self, input_size: 'int', hidden_size: 'int', num_layers: 'int'=1, bias: 'bool'=True, batch_first: 'bool'=False, dropout: 'float'=0, bidirectional: 'bool'=False): rename_dict = self._make_rename_dict(num_layers, bias, bidirectional) super().__init__(rename_dict) self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.bidirectional = bidirectional self.num_directions = 2 if self.bidirectional else 1 LayerClass = BidirectionalDPLSTMLayer if bidirectional else DPLSTMLayer self.layers = nn.ModuleList([LayerClass(input_size=self.input_size if i == 0 else self.hidden_size * self.num_directions, hidden_size =self.hidden_size, bias=self.bias, dropout=self.dropout if i < self.num_layers - 1 else 0) for i in range(num_layers)]) def forward(self, x: 'torch.Tensor', state_init: 'Optional[Tuple[torch.Tensor, torch.Tensor]]'=None) ->Tuple[torch. Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTM when a sequence is input. Dimensions as follows: - B: Batch size - T: Sequence length - D: LSTM input hidden size (eg from a word embedding) - H: LSTM output hidden size - L: number of layers in the LSTM - P: number of directions (2 if bidirectional, else 1) Args: x: Input sequence to the DPLSTM of shape ``[T, B, D]`` state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where: - h_0 of shape ``[L*P, B, H] contains the initial hidden state - c_0 of shape ``[L*P, B, H] contains the initial cell state This argument can be (and defaults to) None, in which case zero tensors will be used. Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H * P]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTM for each timestep ``t``. - ``h_n`` is of shape ``[L * P, B, H]`` and contains the hidden state for ``t = T``. - ``c_n`` is of shape ``[L * P, B, H]`` and contains the cell state for ``t = T``. """ x = self._rearrange_batch_dim(x) _T, B, _D = x.shape L = self.num_layers P = 2 if self.bidirectional else 1 H = self.hidden_size h_0s, c_0s = state_init or (None, None) if h_0s is None: h_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype, device=x[0].device) else: h_0s = h_0s.reshape([L, P, B, H]) if c_0s is None: c_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype, device=x[0].device) else: c_0s = c_0s.reshape([L, P, B, H]) hs: 'List[torch.Tensor]' = [] cs: 'List[torch.Tensor]' = [] for layer, h0, c0 in zip(self.layers, h_0s, c_0s): if not self.bidirectional: h0 = h0.squeeze(0) c0 = c0.squeeze(0) x, (h, c) = layer(x, (h0, c0)) if not self.bidirectional: h = h.unsqueeze(0) c = c.unsqueeze(0) hs.append(h) cs.append(c) hs = torch.cat(hs, dim=0) cs = torch.cat(cs, dim=0) out = self._rearrange_batch_dim(x) return out, (hs, cs) def _rearrange_batch_dim(self, x: 'torch.Tensor') ->torch.Tensor: if self.batch_first: x = x.transpose(0, 1) return x def __repr__(self): s = f'DPLSTM({self.input_size}, {self.hidden_size}, bias={self.bias}' if self.batch_first: s += f', batch_first={self.batch_first}' if self.num_layers > 1: s += f', num_layers={self.num_layers}' if self.dropout: s += f', dropout={self.dropout}' if self.bidirectional: s += f', bidirectional={self.bidirectional}' return s def _make_rename_dict(self, num_layers, bias, bidirectional): """ Programmatically constructs a dictionary old_name -> new_name to align with the param names used in ``torch.nn.LSTM``. """ d = {} components = ['weight'] + ['bias' if bias else []] matrices = ['ih', 'hh'] for i in range(num_layers): for c in components: for m in matrices: nn_name = f'{c}_{m}_l{i}' if bidirectional: d[f'layers.{i}.forward_layer.cell.{m}.{c}'] = nn_name d[f'layers.{i}.reverse_layer.cell.{m}.{c}' ] = nn_name + '_reverse' else: d[f'layers.{i}.cell.{m}.{c}'] = nn_name return d class DPSLTMAdapterNew(nn.Module): """ Adapter for DPLSTM. LSTM returns a tuple, but our testing tools need the model to return a single tensor in output. We do this adaption here. """ def __init__(self, *args, **kwargs): super().__init__() self.dplstm = DPLSTM(*args, **kwargs) def forward(self, input_0): primals_2 = self.dplstm.weight_ih_l0 primals_3 = self.dplstm.bias_ih_l0 primals_4 = self.dplstm.weight_hh_l0 primals_5 = self.dplstm.bias_hh_l0 primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
adriansarstedt/opacus
DPSLTMAdapter
false
12,097
[ "Apache-2.0" ]
0
a6c89e3d6a3a4e3e4b82bc8c68d53265a9a7cba1
https://github.com/adriansarstedt/opacus/tree/a6c89e3d6a3a4e3e4b82bc8c68d53265a9a7cba1
FCNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3q/c3qwr2d2rrpjzvnddomnmdy6cwva4hjlvrn2y5epemk4ak3k2m6c.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gi/cgikzytmesedinj4z3rqn6b5jwviamhgswfmfwdcordkia2bbyno.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_2 => relu_1 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ta/ctadkhhtrzgkcqrpiklb4lubyzabtmsldemj7ajsxkcjz6gi2u5s.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/j2/cj2fvpown4cia7d7vkfcgcqgyjjenqjrj3dsf57yvha4phl4yqmw.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_3, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_3, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (400, 784), (784, 1)) assert_size_stride(primals_3, (400, ), (1, )) assert_size_stride(primals_4, (200, 400), (400, 1)) assert_size_stride(primals_5, (200, ), (1, )) assert_size_stride(primals_6, (100, 200), (200, 1)) assert_size_stride(primals_7, (100, ), (1, )) assert_size_stride(primals_8, (10, 100), (100, 1)) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 1600, grid=grid(1600), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (400, 200), (1, 400), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 800, grid=grid(800), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (200, 100), (1, 200), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf5, primals_7, 400, grid=grid(400), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (100, 10), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_9 buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_3.run(buf6, buf9, 4, 10, grid=grid(4), stream=stream0) del buf6 return (buf9, primals_1, buf1, buf3, buf5, buf9, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((200, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((100, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((10, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class FCNet(nn.Module): """ fully-connected neural network """ def __init__(self): super(FCNet, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc2 = nn.Linear(400, 200) self.fc3 = nn.Linear(200, 100) self.fc4 = nn.Linear(100, 10) def forward(self, x): x = x.view(-1, 784) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = self.fc4(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (400, 784), (784, 1)) assert_size_stride(primals_3, (400,), (1,)) assert_size_stride(primals_4, (200, 400), (400, 1)) assert_size_stride(primals_5, (200,), (1,)) assert_size_stride(primals_6, (100, 200), (200, 1)) assert_size_stride(primals_7, (100,), (1,)) assert_size_stride(primals_8, (10, 100), (100, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 200), (200, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (400, 200), ( 1, 400), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(800)](buf3, primals_5, 800, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (200, 100), ( 1, 200), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(400)](buf5, primals_7, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (100, 10), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_9 buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_3[grid(4)](buf6, buf9, 4, 10, XBLOCK= 1, num_warps=2, num_stages=1) del buf6 return (buf9, primals_1, buf1, buf3, buf5, buf9, primals_8, primals_6, primals_4) class FCNetNew(nn.Module): """ fully-connected neural network """ def __init__(self): super(FCNetNew, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc2 = nn.Linear(400, 200) self.fc3 = nn.Linear(200, 100) self.fc4 = nn.Linear(100, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
animeshbchowdhury/robust-pnr-time
FCNet
false
12,098
[ "BSD-3-Clause" ]
0
301c5d973b8c024a85fdab915986ecf257e7698b
https://github.com/animeshbchowdhury/robust-pnr-time/tree/301c5d973b8c024a85fdab915986ecf257e7698b
StendLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/tl/ctlxbcojtewyoi77dwoaddcc26jtdqu7y755ds3wstwuetn5eola.py # Topologically Sorted Source Nodes: [start_comp, end_comp, add], Original ATen: [aten.binary_cross_entropy_with_logits, aten.add] # Source node to ATen node mapping: # add => add # end_comp => abs_2, exp_1, full_default_1, log1p_1, mean_1, minimum_1, mul_1, neg_1, sub_3, sub_4, sub_5 # start_comp => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %select), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %select), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %select_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %select_1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select_1,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_1, %log1p_1), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub_4), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_5,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r2), None) tmp3 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp16 = tl.load(in_ptr0 + (64 + r2), None) tmp18 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp17 = tmp1 - tmp16 tmp19 = tmp17 * tmp18 tmp20 = triton_helpers.minimum(tmp5, tmp18) tmp21 = tl_math.abs(tmp18) tmp22 = -tmp21 tmp23 = tl_math.exp(tmp22) tmp24 = libdevice.log1p(tmp23) tmp25 = tmp20 - tmp24 tmp26 = tmp19 - tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = 64.0 tmp31 = tmp15 / tmp30 tmp32 = tmp29 / tmp30 tmp33 = tmp31 + tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp33, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [start_comp, end_comp, add], Original ATen: [aten.binary_cross_entropy_with_logits, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_0.run(buf2, arg1_1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from itertools import chain as chain import torch.utils.data import torch.nn as nn from torch.nn.modules.loss import _Loss class StendLoss(_Loss): def __init__(self, size_average=None, reduce=None, reduction='mean'): super(StendLoss, self).__init__() self.reduction = reduction def forward(self, output, target): start_pred = output[:, 0] end_pred = output[:, 1] start_target = target[0] end_target = target[1] start_loss = nn.BCEWithLogitsLoss(reduction=self.reduction) end_loss = nn.BCEWithLogitsLoss(reduction=self.reduction) start_comp = start_loss(start_pred, start_target) end_comp = end_loss(end_pred, end_target) return start_comp + end_comp def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from itertools import chain as chain import torch.utils.data from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp16 = tl.load(in_ptr0 + (64 + r2), None) tmp18 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp17 = tmp1 - tmp16 tmp19 = tmp17 * tmp18 tmp20 = triton_helpers.minimum(tmp5, tmp18) tmp21 = tl_math.abs(tmp18) tmp22 = -tmp21 tmp23 = tl_math.exp(tmp22) tmp24 = libdevice.log1p(tmp23) tmp25 = tmp20 - tmp24 tmp26 = tmp19 - tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = 64.0 tmp31 = tmp15 / tmp30 tmp32 = tmp29 / tmp30 tmp33 = tmp31 + tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_0[grid(1)](buf2, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class StendLossNew(_Loss): def __init__(self, size_average=None, reduce=None, reduction='mean'): super(StendLossNew, self).__init__() self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
anton-br/SlowFast
StendLoss
false
12,099
[ "Apache-2.0" ]
0
6e8d68bc6f3191886a57f819db1c766c6ca32d21
https://github.com/anton-br/SlowFast/tree/6e8d68bc6f3191886a57f819db1c766c6ca32d21
ZeroLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/zi/cziatn4srpsymxab7n67k7jt34egxdol3kpyktgeck2cxwbklbyh.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class ZeroLayer(nn.Module): def __init__(self, stride): super(ZeroLayer, self).__init__() self.stride = stride def forward(self, x): """n, c, h, w = x.size() h //= self.stride w //= self.stride device = x.get_device() if x.is_cuda else torch.device('cpu') # noinspection PyUnresolvedReferences padding = torch.zeros(n, c, h, w, device=device, requires_grad=False) return padding""" return x * 0 @staticmethod def is_zero_layer(): return True def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ZeroLayerNew(nn.Module): def __init__(self, stride): super(ZeroLayerNew, self).__init__() self.stride = stride @staticmethod def is_zero_layer(): return True def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
airglow/nni
ZeroLayer
false
12,100
[ "MIT" ]
0
751065b788f66a6b53446620293095b1fe1b1c65
https://github.com/airglow/nni/tree/751065b788f66a6b53446620293095b1fe1b1c65
SpaceToDepth
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision import datasets as datasets import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data.distributed class SpaceToDepth(nn.Module): def __init__(self, block_size=4): super().__init__() assert block_size == 4 self.bs = block_size def forward(self, x): N, C, H, W = x.size() x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() x = x.view(N, C * self.bs ** 2, H // self.bs, W // self.bs) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torchvision import datasets as datasets import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 1, 1), 0), class SpaceToDepthNew(nn.Module): def __init__(self, block_size=4): super().__init__() assert block_size == 4 self.bs = block_size def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
adam-dziedzic/ASL
SpaceToDepth
false
12,101
[ "MIT" ]
0
cc063f5e7eda1498544ad2c3b224985203b0774a
https://github.com/adam-dziedzic/ASL/tree/cc063f5e7eda1498544ad2c3b224985203b0774a
HSwish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/jj/cjjcpa4jfom3kmx4ufnxtda3bmq466cpemkegyhzep2ymmlsg35l.py # Topologically Sorted Source Nodes: [add, relu6, mul, out], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div] # Source node to ATen node mapping: # add => add # mul => mul # out => div # relu6 => clamp_max, clamp_min # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %clamp_max), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6.0), kwargs = {}) triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = 0.16666666666666666 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, relu6, mul, out], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data.distributed class HSwish(nn.Module): """ Applies the Hard-Swish function element-wise. `"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_ Examples: >>> m = Mish() >>> x = torch.randn(2) >>> output = m(x) """ @staticmethod def forward(x: 'torch.Tensor') ->torch.Tensor: out = x * torch.nn.functional.relu6(x + 3, inplace=True) / 6.0 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = 0.16666666666666666 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class HSwishNew(nn.Module): """ Applies the Hard-Swish function element-wise. `"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_ Examples: >>> m = Mish() >>> x = torch.randn(2) >>> output = m(x) """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ardianumam/Vanilla-GAN
HSwish
false
12,102
[ "Apache-2.0" ]
0
3fce9b60dca4609aad1d4e5eb834a2cc72cf07b3
https://github.com/ardianumam/Vanilla-GAN/tree/3fce9b60dca4609aad1d4e5eb834a2cc72cf07b3
SpatialAttentionGate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/qv/cqvpm4tidhpw42vquodkna5kubx3c46djnnb2jim63auds7wtadt.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/go/cgofqcgduqrtcjakfd7uk3wkcrpwsqxispluihwsstry6ekodk2u.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => sigmoid # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1024, grid=grid(1024), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_1.run(buf3, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class SpatialAttentionGate(nn.Module): def __init__(self, channel, reduction=16): super(SpatialAttentionGate, self).__init__() self.fc1 = nn.Conv2d(channel, reduction, kernel_size=1, padding=0) self.fc2 = nn.Conv2d(reduction, 1, kernel_size=1, padding=0) def forward(self, x): x = self.fc1(x) x = F.relu(x, inplace=True) x = self.fc2(x) x = torch.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1024)](buf1, primals_2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_sigmoid_1[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf3 class SpatialAttentionGateNew(nn.Module): def __init__(self, channel, reduction=16): super(SpatialAttentionGateNew, self).__init__() self.fc1 = nn.Conv2d(channel, reduction, kernel_size=1, padding=0) self.fc2 = nn.Conv2d(reduction, 1, kernel_size=1, padding=0) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
airglow/nni
SpatialAttentionGate
false
12,103
[ "MIT" ]
0
751065b788f66a6b53446620293095b1fe1b1c65
https://github.com/airglow/nni/tree/751065b788f66a6b53446620293095b1fe1b1c65
HSigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/gl/cgljna3wfarubemgd6d2p3bgazvfhdxtrcu7luu5yza3rrfkty2s.py # Topologically Sorted Source Nodes: [add, relu6, out], Original ATen: [aten.add, aten.hardtanh, aten.div] # Source node to ATen node mapping: # add => add # out => div # relu6 => clamp_max, clamp_min # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {}) triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, relu6, out], Original ATen: [aten.add, aten.hardtanh, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data.distributed class HSigmoid(nn.Module): """ Applies the Hard-Sigmoid function element-wise. `"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_ Examples: >>> m = Mish() >>> x = torch.randn(2) >>> output = m(x) """ @staticmethod def forward(x: 'torch.Tensor') ->torch.Tensor: out = torch.nn.functional.relu6(x + 3, inplace=True) / 6.0 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HSigmoidNew(nn.Module): """ Applies the Hard-Sigmoid function element-wise. `"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_ Examples: >>> m = Mish() >>> x = torch.randn(2) >>> output = m(x) """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ardianumam/Vanilla-GAN
HSigmoid
false
12,104
[ "Apache-2.0" ]
0
3fce9b60dca4609aad1d4e5eb834a2cc72cf07b3
https://github.com/ardianumam/Vanilla-GAN/tree/3fce9b60dca4609aad1d4e5eb834a2cc72cf07b3
_AddNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/2z/c2zkvxx2bap6rkrvu5u2ypibpze2q2ydbjc6qyfqciinjgpzs57e.py # Topologically Sorted Source Nodes: [sigmoid, mul, skip, add, output], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # mul => mul # output => var_mean # sigmoid => sigmoid # skip => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (1)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp18 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (2)) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (3)) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp1 * tmp4 tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tmp0 + tmp7 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp10 * tmp13 tmp15 = tmp14 * tmp6 tmp16 = tmp9 + tmp15 tmp17 = tmp8 + tmp16 tmp22 = tl.sigmoid(tmp21) tmp23 = tmp19 * tmp22 tmp24 = tmp23 * tmp6 tmp25 = tmp18 + tmp24 tmp26 = tmp17 + tmp25 tmp31 = tl.sigmoid(tmp30) tmp32 = tmp28 * tmp31 tmp33 = tmp32 * tmp6 tmp34 = tmp27 + tmp33 tmp35 = tmp26 + tmp34 tmp36 = 4.0 tmp37 = tmp35 / tmp36 tmp38 = tmp8 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp16 - tmp37 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp25 - tmp37 tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = tmp34 - tmp37 tmp47 = tmp46 * tmp46 tmp48 = tmp45 + tmp47 tmp49 = tmp48 / tmp36 tl.store(out_ptr0 + (x0), tmp37, xmask) tl.store(out_ptr1 + (x0), tmp49, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/sj/csj2kjzhnbru45opn7vpbebkenbpwwfmi66xlyyx5mjua6j72k6i.py # Topologically Sorted Source Nodes: [sigmoid, mul, skip, add, output], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # mul => mul # output => add_1, add_2, mul_2, mul_3, rsqrt, sub # sigmoid => sigmoid # skip => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_5), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp9 = tmp7 - tmp8 tmp11 = 1e-05 tmp12 = tmp10 + tmp11 tmp13 = libdevice.rsqrt(tmp12) tmp14 = tmp9 * tmp13 tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, skip, add, output], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0.run(primals_3, primals_2, primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, skip, add, output], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_1.run(primals_3, primals_2, primals_1, buf0, buf1, primals_4, primals_5, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_5 return (buf2, primals_1, primals_2, primals_3, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr2 + 1) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp18 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + 2) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr2 + 3) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp1 * tmp4 tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tmp0 + tmp7 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp10 * tmp13 tmp15 = tmp14 * tmp6 tmp16 = tmp9 + tmp15 tmp17 = tmp8 + tmp16 tmp22 = tl.sigmoid(tmp21) tmp23 = tmp19 * tmp22 tmp24 = tmp23 * tmp6 tmp25 = tmp18 + tmp24 tmp26 = tmp17 + tmp25 tmp31 = tl.sigmoid(tmp30) tmp32 = tmp28 * tmp31 tmp33 = tmp32 * tmp6 tmp34 = tmp27 + tmp33 tmp35 = tmp26 + tmp34 tmp36 = 4.0 tmp37 = tmp35 / tmp36 tmp38 = tmp8 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp16 - tmp37 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp25 - tmp37 tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = tmp34 - tmp37 tmp47 = tmp46 * tmp46 tmp48 = tmp45 + tmp47 tmp49 = tmp48 / tmp36 tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp49, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp9 = tmp7 - tmp8 tmp11 = 1e-05 tmp12 = tmp10 + tmp11 tmp13 = libdevice.rsqrt(tmp12) tmp14 = tmp9 * tmp13 tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0[grid(64)]( primals_3, primals_2, primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(256)]( primals_3, primals_2, primals_1, buf0, buf1, primals_4, primals_5, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_5 return buf2, primals_1, primals_2, primals_3, primals_4 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNormNew(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, input_0, input_1): primals_1 = self.mask primals_4 = self.norm.weight primals_5 = self.norm.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
amadejkocbek/darts
_AddNorm
false
12,105
[ "Apache-2.0" ]
0
074be2a76eee11258da066878c564badf40834e9
https://github.com/amadejkocbek/darts/tree/074be2a76eee11258da066878c564badf40834e9
_ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/4q/c4qoh645afcunrhaa5xye6sbkw2mzzlvmntdpffld4732bbjzx7o.py # Topologically Sorted Source Nodes: [dimension, attn_2], Original ATen: [aten.sqrt, aten._softmax] # Source node to ATen node mapping: # attn_2 => exp # dimension => full_default # Graph fragment: # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%full_default, 0), kwargs = {}) # %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False}) # %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, %where_self), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %full_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_sqrt_0 = async_compile.triton('triton_poi_fused__softmax_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 2.0 tmp2 = 0.0 tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6 * tmp1 tmp21 = tmp19 / tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dimension, attn_2], Original ATen: [aten.sqrt, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_sqrt_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _ScaledDotProductAttention(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super(_ScaledDotProductAttention, self).__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.dropout = dropout self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.permute(0, 2, 1)) if self.scale: dimension = torch.sqrt(torch.tensor(k.shape[-1])) attn = attn / dimension if mask is not None: attn = attn.masked_fill(mask, -1000000000.0) attn = self.softmax(attn) if self.dropout is not None: attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 2.0 tmp2 = 0.0 tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6 * tmp1 tmp21 = tmp19 / tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class _ScaledDotProductAttentionNew(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super(_ScaledDotProductAttentionNew, self).__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.dropout = dropout self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
amadejkocbek/darts
_ScaledDotProductAttention
false
12,106
[ "Apache-2.0" ]
0
074be2a76eee11258da066878c564badf40834e9
https://github.com/amadejkocbek/darts/tree/074be2a76eee11258da066878c564badf40834e9
_ResampleNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/kd/ckdxxgoclliz5lzakuv2sbeywai7mrb5oztwl6f5ipzd4hqe42pt.py # Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm] # Source node to ATen node mapping: # mul => mul # output => var_mean # sigmoid => sigmoid # x => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%mul_1, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_mul_native_layer_norm_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_native_layer_norm_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_native_layer_norm_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp21 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp7 * tmp10 tmp12 = tmp11 * tmp5 tmp13 = tmp6 + tmp12 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp14 * tmp17 tmp19 = tmp18 * tmp5 tmp20 = tmp13 + tmp19 tmp24 = tl.sigmoid(tmp23) tmp25 = tmp21 * tmp24 tmp26 = tmp25 * tmp5 tmp27 = tmp20 + tmp26 tmp28 = 4.0 tmp29 = tmp27 / tmp28 tmp30 = tmp6 - tmp29 tmp31 = tmp30 * tmp30 tmp32 = tmp12 - tmp29 tmp33 = tmp32 * tmp32 tmp34 = tmp31 + tmp33 tmp35 = tmp19 - tmp29 tmp36 = tmp35 * tmp35 tmp37 = tmp34 + tmp36 tmp38 = tmp26 - tmp29 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp41 = tmp40 / tmp28 tl.store(out_ptr0 + (x0), tmp29, xmask) tl.store(out_ptr1 + (x0), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xc/cxcpfundfbswf2vcixxfx2augodmjtu2z5yzl767pujn6nzsbm4e.py # Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm] # Source node to ATen node mapping: # mul => mul # output => add, add_1, mul_2, mul_3, rsqrt, sub # sigmoid => sigmoid # x => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %getitem_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_4), kwargs = {}) triton_poi_fused_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_native_layer_norm_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_mul_native_layer_norm_sigmoid_0.run(primals_2, primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm] triton_poi_fused_mul_native_layer_norm_sigmoid_1.run(primals_2, primals_1, buf0, buf1, primals_3, primals_4, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_4 return (buf2, primals_1, primals_2, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp21 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp7 * tmp10 tmp12 = tmp11 * tmp5 tmp13 = tmp6 + tmp12 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp14 * tmp17 tmp19 = tmp18 * tmp5 tmp20 = tmp13 + tmp19 tmp24 = tl.sigmoid(tmp23) tmp25 = tmp21 * tmp24 tmp26 = tmp25 * tmp5 tmp27 = tmp20 + tmp26 tmp28 = 4.0 tmp29 = tmp27 / tmp28 tmp30 = tmp6 - tmp29 tmp31 = tmp30 * tmp30 tmp32 = tmp12 - tmp29 tmp33 = tmp32 * tmp32 tmp34 = tmp31 + tmp33 tmp35 = tmp19 - tmp29 tmp36 = tmp35 * tmp35 tmp37 = tmp34 + tmp36 tmp38 = tmp26 - tmp29 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp41 = tmp40 / tmp28 tl.store(out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr1 + x0, tmp41, xmask) @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_mul_native_layer_norm_sigmoid_0[grid(64)](primals_2, primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_native_layer_norm_sigmoid_1[grid(256)](primals_2, primals_1, buf0, buf1, primals_3, primals_4, buf2, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_4 return buf2, primals_1, primals_2, primals_3 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _ResampleNormNew(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, input_0): primals_1 = self.mask primals_3 = self.norm.weight primals_4 = self.norm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
amadejkocbek/darts
_ResampleNorm
false
12,107
[ "Apache-2.0" ]
0
074be2a76eee11258da066878c564badf40834e9
https://github.com/amadejkocbek/darts/tree/074be2a76eee11258da066878c564badf40834e9
_GateAddNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/pt/cpt3kou2mafw4tfe5tp2oixeb4z2kkf6adwejs6l7mux2qvryb27.py # Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.glu, aten.add] # Source node to ATen node mapping: # add => add # x_1 => glu # Graph fragment: # %glu : [num_users=1] = call_function[target=torch.ops.aten.glu.default](args = (%view_1,), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%glu, %primals_4), kwargs = {}) triton_poi_fused_add_glu_0 = async_compile.triton('triton_poi_fused_add_glu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_glu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_glu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp4 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6n/c6nwltytpo33ssumvxlcryrpvlql2hsjrmxl624j4dkkjxt5qgkm.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # output => add_1, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # output => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.glu, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_glu_0.run(buf0, primals_4, buf1, 256, grid=grid(256), stream=stream0) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_6 return (buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_glu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp4 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_glu_0[grid(256)](buf0, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf1 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _GateAddNormNew(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, input_0, input_1): primals_1 = self.glu.fc.weight primals_2 = self.glu.fc.bias primals_5 = self.add_norm.norm.weight primals_6 = self.add_norm.norm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
amadejkocbek/darts
_GateAddNorm
false
12,108
[ "Apache-2.0" ]
0
074be2a76eee11258da066878c564badf40834e9
https://github.com/amadejkocbek/darts/tree/074be2a76eee11258da066878c564badf40834e9
_GatedLinearUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/fg/cfgseyxhpu7b6i4xtsiblktsjg6wbg2mlcsyld7lmr4dhbh7u4xc.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.glu] # Source node to ATen node mapping: # x_1 => glu # Graph fragment: # %glu : [num_users=1] = call_function[target=torch.ops.aten.glu.default](args = (%view_1,), kwargs = {}) triton_poi_fused_glu_0 = async_compile.triton('triton_poi_fused_glu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_glu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_glu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.glu] stream0 = get_raw_stream(0) triton_poi_fused_glu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_glu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_glu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0) class _GatedLinearUnitNew(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
amadejkocbek/darts
_GatedLinearUnit
false
12,109
[ "Apache-2.0" ]
0
074be2a76eee11258da066878c564badf40834e9
https://github.com/amadejkocbek/darts/tree/074be2a76eee11258da066878c564badf40834e9
TReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/py/cpyzadjwmhrswle2fe2xnqc4ovcx6ej5wf6x5qrio2i2w7v2ppfk.py # Topologically Sorted Source Nodes: [sub, relu, x], Original ATen: [aten.sub, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # sub => sub # x => add # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_1), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_sub_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_sub_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_sub_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp5 + tmp2 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr1 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [sub, relu, x], Original ATen: [aten.sub, aten.relu, aten.add, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_sub_threshold_backward_0.run(primals_2, primals_1, buf0, buf1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class TReLU(nn.Module): def __init__(self): super(TReLU, self).__init__() self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True) self.alpha.data.fill_(0) def forward(self, x): x = F.relu(x - self.alpha) + self.alpha return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_relu_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp5 + tmp2 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_sub_threshold_backward_0[grid(256)](primals_2 , primals_1, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_1 del primals_2 return buf0, buf1 class TReLUNew(nn.Module): def __init__(self): super(TReLUNew, self).__init__() self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True) self.alpha.data.fill_(0) def forward(self, input_0): primals_1 = self.alpha primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
archiroid003/ICCV2019-LearningToPaint
TReLU
false
12,110
[ "MIT" ]
0
4b5fc263e4843c159a61e5956956b3f7812693f8
https://github.com/archiroid003/ICCV2019-LearningToPaint/tree/4b5fc263e4843c159a61e5956956b3f7812693f8
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/v7/cv7humnywkkqhrumbeetegqlkretdwtkj5pcanrbgxrolupvobzt.py # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, h], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # h => mul_3 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # tanh => tanh # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, h], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5, alpha=1, beta=1, out=buf2) del primals_4 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class SharedDropout(torch.nn.Module): def __init__(self, p): super(SharedDropout, self).__init__() self.p = p def forward(self, x): if self.training: mask = torch.rand_like(x[0:1]) > self.p return mask.type(x.type()) * x / (1 - self.p) else: return x class Conv1D(nn.Module): def __init__(self, nf, nx): """ Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2) Basically works like a Linear layer but the weights are transposed """ super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class MLP(nn.Module): def __init__(self, n_state, config): super().__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu self.dropout = SharedDropout(config.resid_pdrop) def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return self.dropout(h2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_state': 4, 'config': _mock_config(n_embd=4, resid_pdrop=4)} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), primals_5, alpha=1, beta=1, out=buf2) del primals_4 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor( primals_1, (4, 64), (1, 4), 0) def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class SharedDropout(torch.nn.Module): def __init__(self, p): super(SharedDropout, self).__init__() self.p = p def forward(self, x): if self.training: mask = torch.rand_like(x[0:1]) > self.p return mask.type(x.type()) * x / (1 - self.p) else: return x class Conv1D(nn.Module): def __init__(self, nf, nx): """ Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2) Basically works like a Linear layer but the weights are transposed """ super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class MLPNew(nn.Module): def __init__(self, n_state, config): super().__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu self.dropout = SharedDropout(config.resid_pdrop) def forward(self, input_0): primals_3 = self.c_fc.weight primals_2 = self.c_fc.bias primals_5 = self.c_proj.weight primals_4 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
albertkx/GeDi
MLP
false
12,111
[ "BSD-3-Clause" ]
0
27532eb6ac5dd42d817d25a905401504e916f9fb
https://github.com/albertkx/GeDi/tree/27532eb6ac5dd42d817d25a905401504e916f9fb
_GatedResidualNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_1 => expm1, gt, mul, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/g2/cg2n33ecqurwkyiyucsylguej6exc6zpz6fyhk7hcbdsevf2l4sr.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.glu] # Source node to ATen node mapping: # x_5 => glu # Graph fragment: # %glu : [num_users=2] = call_function[target=torch.ops.aten.glu.default](args = (%view_5,), kwargs = {}) triton_poi_fused_glu_1 = async_compile.triton('triton_poi_fused_glu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_glu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ug/cug3tl2jlnwqshzkisx4mst6uxoyc5sgz3jeqwxup5r7eoieamdp.py # Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # output => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%glu, %primals_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/uz/cuzkkvrgtcbz6vvu6omkkiofk54nlyp34lpauxhrah7fmpyygjuq.py # Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # output => add_1, add_2, mul_3, mul_4, rsqrt, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%glu, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_8), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_9), kwargs = {}) triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_elu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.glu] triton_poi_fused_glu_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_2.run(buf4, primals_1, buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_3.run(buf4, primals_1, buf5, buf6, primals_8, primals_9, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 del primals_9 return (buf7, primals_1, primals_8, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf4, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output class _GatedResidualNetwork(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int', output_size: 'int', dropout: 'float'=0.1, context_size: 'int'=None, residual: 'bool'=False): super().__init__() self.input_size = input_size self.output_size = output_size self.context_size = context_size self.hidden_size = hidden_size self.dropout = dropout self.residual = residual if self.input_size != self.output_size and not self.residual: residual_size = self.input_size else: residual_size = self.output_size if self.output_size != residual_size: self.resample_norm = _ResampleNorm(residual_size, self.output_size) self.fc1 = nn.Linear(self.input_size, self.hidden_size) self.elu = nn.ELU() if self.context_size is not None: self.context = nn.Linear(self.context_size, self.hidden_size, bias=False) self.fc2 = nn.Linear(self.hidden_size, self.hidden_size) self.init_weights() self.gate_norm = _GateAddNorm(input_size=self.hidden_size, skip_size=self.output_size, hidden_size=self.output_size, dropout=self.dropout, trainable_add=False) def init_weights(self): for name, p in self.named_parameters(): if 'bias' in name: torch.nn.init.zeros_(p) elif 'fc1' in name or 'fc2' in name: torch.nn.init.kaiming_normal_(p, a=0, mode='fan_in', nonlinearity='leaky_relu') elif 'context' in name: torch.nn.init.xavier_uniform_(p) def forward(self, x, context=None, residual=None): if residual is None: residual = x if self.input_size != self.output_size and not self.residual: residual = self.resample_norm(residual) x = self.fc1(x) if context is not None: context = self.context(context) x = x + context x = self.elu(x) x = self.fc2(x) x = self.gate_norm(x, residual) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_glu_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(64)](buf4, primals_1, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(256)](buf4, primals_1, buf5, buf6, primals_8, primals_9, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 del primals_9 return buf7, primals_1, primals_8, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf4, primals_6, primals_4 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output class _GatedResidualNetworkNew(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int', output_size: 'int', dropout: 'float'=0.1, context_size: 'int'=None, residual: 'bool'=False): super().__init__() self.input_size = input_size self.output_size = output_size self.context_size = context_size self.hidden_size = hidden_size self.dropout = dropout self.residual = residual if self.input_size != self.output_size and not self.residual: residual_size = self.input_size else: residual_size = self.output_size if self.output_size != residual_size: self.resample_norm = _ResampleNorm(residual_size, self.output_size) self.fc1 = nn.Linear(self.input_size, self.hidden_size) self.elu = nn.ELU() if self.context_size is not None: self.context = nn.Linear(self.context_size, self.hidden_size, bias=False) self.fc2 = nn.Linear(self.hidden_size, self.hidden_size) self.init_weights() self.gate_norm = _GateAddNorm(input_size=self.hidden_size, skip_size=self.output_size, hidden_size=self.output_size, dropout=self.dropout, trainable_add=False) def init_weights(self): for name, p in self.named_parameters(): if 'bias' in name: torch.nn.init.zeros_(p) elif 'fc1' in name or 'fc2' in name: torch.nn.init.kaiming_normal_(p, a=0, mode='fan_in', nonlinearity='leaky_relu') elif 'context' in name: torch.nn.init.xavier_uniform_(p) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.gate_norm.glu.fc.weight primals_7 = self.gate_norm.glu.fc.bias primals_8 = self.gate_norm.add_norm.norm.weight primals_9 = self.gate_norm.add_norm.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
amadejkocbek/darts
_GatedResidualNetwork
false
12,112
[ "Apache-2.0" ]
0
074be2a76eee11258da066878c564badf40834e9
https://github.com/amadejkocbek/darts/tree/074be2a76eee11258da066878c564badf40834e9
SEModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/o5/co5kpgkyaabh4nd7yz4gzpyl7x35mwdhgusbruykvtydzlq2lizg.py # Topologically Sorted Source Nodes: [x_se2, x_se2_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_se2 => convolution # x_se2_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py # Topologically Sorted Source Nodes: [x_se_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_se_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/lp/clprvnh5p6cmadxtwzizwydrpjlwxohxixbw4ntucp6srbu6gtis.py # Topologically Sorted Source Nodes: [x_se_2, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # x_se_2 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_se2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 0, 0), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_se2, x_se2_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_se_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_se_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_se_2, mul], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_3.run(primals_1, buf5, buf6, 256, grid=grid(256), stream=stream0) return (buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision import datasets as datasets import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data.distributed class FastAvgPool2d(nn.Module): def __init__(self, flatten=False): super(FastAvgPool2d, self).__init__() self.flatten = flatten def forward(self, x): if self.flatten: in_size = x.size() return x.view((in_size[0], in_size[1], -1)).mean(dim=2) else: return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1) class SEModule(nn.Module): def __init__(self, channels, reduction_channels, inplace=True): super(SEModule, self).__init__() self.avg_pool = FastAvgPool2d() self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, padding=0, bias=True) self.relu = nn.ReLU(inplace=inplace) self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, padding=0, bias=True) self.activation = nn.Sigmoid() def forward(self, x): x_se = self.avg_pool(x) x_se2 = self.fc1(x_se) x_se2 = self.relu(x_se2) x_se = self.fc2(x_se2) x_se = self.activation(x_se) return x * x_se def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'reduction_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torchvision import datasets as datasets import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 0, 0), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf3, buf5 class FastAvgPool2d(nn.Module): def __init__(self, flatten=False): super(FastAvgPool2d, self).__init__() self.flatten = flatten def forward(self, x): if self.flatten: in_size = x.size() return x.view((in_size[0], in_size[1], -1)).mean(dim=2) else: return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1) class SEModuleNew(nn.Module): def __init__(self, channels, reduction_channels, inplace=True): super(SEModuleNew, self).__init__() self.avg_pool = FastAvgPool2d() self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, padding=0, bias=True) self.relu = nn.ReLU(inplace=inplace) self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, padding=0, bias=True) self.activation = nn.Sigmoid() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
adam-dziedzic/ASL
SEModule
false
12,113
[ "MIT" ]
0
cc063f5e7eda1498544ad2c3b224985203b0774a
https://github.com/adam-dziedzic/ASL/tree/cc063f5e7eda1498544ad2c3b224985203b0774a
PreActBlockNoBN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu] # Source node to ATen node mapping: # out => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yl/cyl57twtgf3lzd5sst7snomgtzysir6mpvrzx6jm7k4lxpcq6sru.py # Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # out_3 => convolution_1 # out_4 => add # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_2.run(buf4, primals_5, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_5 return (buf4, primals_2, primals_4, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PreActBlockNoBN(nn.Module): """Pre-activation version of the BasicBlock.""" expansion = 1 def __init__(self, in_planes, planes, stride=1): super(PreActBlockNoBN, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride= stride, padding=1) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1) if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self. expansion * planes, kernel_size=1, stride=stride)) def forward(self, x): out = F.relu(x) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = F.relu(out) out = self.conv2(out) out += shortcut return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_convolution_2[grid(256)](buf4, primals_5, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_5 return buf4, primals_2, primals_4, buf0, buf2 class PreActBlockNoBNNew(nn.Module): """Pre-activation version of the BasicBlock.""" expansion = 1 def __init__(self, in_planes, planes, stride=1): super(PreActBlockNoBNNew, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride= stride, padding=1) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1) if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self. expansion * planes, kernel_size=1, stride=stride)) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
arhik/LoCo
PreActBlockNoBN
false
12,114
[ "MIT" ]
0
de3792a8c5650ee1efa0682ad494a3b1b1be3dd0
https://github.com/arhik/LoCo/tree/de3792a8c5650ee1efa0682ad494a3b1b1be3dd0
up
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/vz/cvzodr5gcejdys7tscy26lmdqza3z2erfgo6btt332sh43xnk5w6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%constant_pad_nd, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) % 8 x1 = (xindex // 4) % 4 x0 = xindex % 4 x3 = (xindex // 128) x6 = xindex % 16 x7 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 2 + x1 tmp6 = tmp5 >= tmp1 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = 2 + x0 tmp10 = tmp9 >= tmp1 tmp11 = tmp9 < tmp7 tmp12 = tmp6 & tmp8 tmp13 = tmp12 & tmp10 tmp14 = tmp13 & tmp11 tmp15 = tmp14 & tmp4 tmp16 = tl.load(in_ptr0 + (18 + x0 + (8*x1) + (64*x2) + (256*x3)), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + (x2), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp16 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp15, tmp18, tmp19) tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp4, tmp20, tmp21) tmp23 = tmp0 >= tmp3 tmp24 = tmp0 < tmp7 tmp25 = tl.load(in_ptr2 + (x6 + (16*((-4) + x2)) + (64*x3)), tmp23 & xmask, other=0.0) tmp26 = tl.where(tmp4, tmp22, tmp25) tl.store(out_ptr0 + (x7), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 512, grid=grid(512), stream=stream0) del buf0 del primals_2 del primals_4 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class up(nn.Module): def __init__(self, in_ch, out_ch): super(up, self).__init__() self.up_scale = nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2) def forward(self, x1, x2): x2 = self.up_scale(x2) diffY = x1.size()[2] - x2.size()[2] diffX = x1.size()[3] - x2.size()[3] x2 = F.pad(x2, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2]) x = torch.cat([x2, x1], dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 8 x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex // 128 x6 = xindex % 16 x7 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 2 + x1 tmp6 = tmp5 >= tmp1 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = 2 + x0 tmp10 = tmp9 >= tmp1 tmp11 = tmp9 < tmp7 tmp12 = tmp6 & tmp8 tmp13 = tmp12 & tmp10 tmp14 = tmp13 & tmp11 tmp15 = tmp14 & tmp4 tmp16 = tl.load(in_ptr0 + (18 + x0 + 8 * x1 + 64 * x2 + 256 * x3), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x2, tmp15 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp16 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp15, tmp18, tmp19) tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp4, tmp20, tmp21) tmp23 = tmp0 >= tmp3 tmp25 = tl.load(in_ptr2 + (x6 + 16 * (-4 + x2) + 64 * x3), tmp23 & xmask, other=0.0) tmp26 = tl.where(tmp4, tmp22, tmp25) tl.store(out_ptr0 + x7, tmp26, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf0, primals_2, primals_4, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_4 return buf1, primals_1, primals_3 class upNew(nn.Module): def __init__(self, in_ch, out_ch): super(upNew, self).__init__() self.up_scale = nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2) def forward(self, input_0, input_1): primals_1 = self.up_scale.weight primals_2 = self.up_scale.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
aribryan/segmentation_revisit
up
false
12,115
[ "MIT" ]
0
a37747cfa7bfa7bfd4c0c01983421f632cd719ba
https://github.com/aribryan/segmentation_revisit/tree/a37747cfa7bfa7bfd4c0c01983421f632cd719ba
ResnetBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wo/cwo5hzyj7r5kfs5qkbujhau55erj2h3367t3krgxxma4ysrszby7.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # out => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/vo/cvo56aotw4yuhuax6oyrf43t5ssqhzuwodjmjfylt42bqssid7vq.py # Topologically Sorted Source Nodes: [dx, out_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # dx => convolution # out_1 => gt_1, mul_1, where_1 # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/s2/cs2vpvzozkgzsyza3q4itao3gysgfuswe2uukw2smxzkcuqqiszu.py # Topologically Sorted Source Nodes: [dx_1, mul, out_2], Original ATen: [aten.convolution, aten.mul, aten.add] # Source node to ATen node mapping: # dx_1 => convolution_1 # mul => mul_2 # out_2 => add # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul_2), kwargs = {}) triton_poi_fused_add_convolution_mul_2 = async_compile.triton('triton_poi_fused_add_convolution_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_out_ptr0 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.1 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(in_out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [dx], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dx, out_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, buf3, 256, grid=grid(256), stream=stream0) del buf1 del primals_3 # Topologically Sorted Source Nodes: [dx_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [dx_1, mul, out_2], Original ATen: [aten.convolution, aten.mul, aten.add] triton_poi_fused_add_convolution_mul_2.run(buf5, primals_1, primals_5, 256, grid=grid(256), stream=stream0) del primals_1 del primals_5 return (buf5, primals_2, primals_4, buf0, buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F import torch.utils.data import torch.utils.data.distributed def actvn(x): out = F.leaky_relu(x, 0.2) return out class ResnetBlock(nn.Module): def __init__(self, fin, fout, fhidden=None, is_bias=True): super().__init__() self.is_bias = is_bias self.learned_shortcut = fin != fout self.fin = fin self.fout = fout if fhidden is None: self.fhidden = min(fin, fout) else: self.fhidden = fhidden self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1) self.conv_1 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=is_bias) if self.learned_shortcut: self.conv_s = nn.Conv2d(self.fin, self.fout, 1, stride=1, padding=0, bias=False) def forward(self, x): x_s = self._shortcut(x) dx = self.conv_0(actvn(x)) dx = self.conv_1(actvn(dx)) out = x_s + 0.1 * dx return out def _shortcut(self, x): if self.learned_shortcut: x_s = self.conv_s(x) else: x_s = x return x_s def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'fin': 4, 'fout': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import functional as F import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.1 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_add_convolution_mul_2[grid(256)](buf5, primals_1, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_5 return buf5, primals_2, primals_4, buf0, buf2, buf3 def actvn(x): out = F.leaky_relu(x, 0.2) return out class ResnetBlockNew(nn.Module): def __init__(self, fin, fout, fhidden=None, is_bias=True): super().__init__() self.is_bias = is_bias self.learned_shortcut = fin != fout self.fin = fin self.fout = fout if fhidden is None: self.fhidden = min(fin, fout) else: self.fhidden = fhidden self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1) self.conv_1 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=is_bias) if self.learned_shortcut: self.conv_s = nn.Conv2d(self.fin, self.fout, 1, stride=1, padding=0, bias=False) def _shortcut(self, x): if self.learned_shortcut: x_s = self.conv_s(x) else: x_s = x return x_s def forward(self, input_0): primals_2 = self.conv_0.weight primals_3 = self.conv_0.bias primals_4 = self.conv_1.weight primals_5 = self.conv_1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
arnabgho/GAN_stability
ResnetBlock
false
12,116
[ "MIT" ]
0
5037d1d856be58818d1c825cd415e0d90d90aff2
https://github.com/arnabgho/GAN_stability/tree/5037d1d856be58818d1c825cd415e0d90d90aff2